session.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. #include <linux/kernel.h>
  2. #include <traceevent/event-parse.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include <sys/mman.h>
  7. #include "evlist.h"
  8. #include "evsel.h"
  9. #include "session.h"
  10. #include "tool.h"
  11. #include "sort.h"
  12. #include "util.h"
  13. #include "cpumap.h"
  14. #include "perf_regs.h"
  15. #include "vdso.h"
  16. static int perf_session__open(struct perf_session *self, bool force)
  17. {
  18. struct stat input_stat;
  19. if (!strcmp(self->filename, "-")) {
  20. self->fd_pipe = true;
  21. self->fd = STDIN_FILENO;
  22. if (perf_session__read_header(self) < 0)
  23. pr_err("incompatible file format (rerun with -v to learn more)");
  24. return 0;
  25. }
  26. self->fd = open(self->filename, O_RDONLY);
  27. if (self->fd < 0) {
  28. int err = errno;
  29. pr_err("failed to open %s: %s", self->filename, strerror(err));
  30. if (err == ENOENT && !strcmp(self->filename, "perf.data"))
  31. pr_err(" (try 'perf record' first)");
  32. pr_err("\n");
  33. return -errno;
  34. }
  35. if (fstat(self->fd, &input_stat) < 0)
  36. goto out_close;
  37. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  38. pr_err("file %s not owned by current user or root\n",
  39. self->filename);
  40. goto out_close;
  41. }
  42. if (!input_stat.st_size) {
  43. pr_info("zero-sized file (%s), nothing to do!\n",
  44. self->filename);
  45. goto out_close;
  46. }
  47. if (perf_session__read_header(self) < 0) {
  48. pr_err("incompatible file format (rerun with -v to learn more)");
  49. goto out_close;
  50. }
  51. if (!perf_evlist__valid_sample_type(self->evlist)) {
  52. pr_err("non matching sample_type");
  53. goto out_close;
  54. }
  55. if (!perf_evlist__valid_sample_id_all(self->evlist)) {
  56. pr_err("non matching sample_id_all");
  57. goto out_close;
  58. }
  59. self->size = input_stat.st_size;
  60. return 0;
  61. out_close:
  62. close(self->fd);
  63. self->fd = -1;
  64. return -1;
  65. }
  66. void perf_session__set_id_hdr_size(struct perf_session *session)
  67. {
  68. u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
  69. machines__set_id_hdr_size(&session->machines, id_hdr_size);
  70. }
  71. int perf_session__create_kernel_maps(struct perf_session *self)
  72. {
  73. int ret = machine__create_kernel_maps(&self->machines.host);
  74. if (ret >= 0)
  75. ret = machines__create_guest_kernel_maps(&self->machines);
  76. return ret;
  77. }
  78. static void perf_session__destroy_kernel_maps(struct perf_session *self)
  79. {
  80. machines__destroy_kernel_maps(&self->machines);
  81. }
  82. struct perf_session *perf_session__new(const char *filename, int mode,
  83. bool force, bool repipe,
  84. struct perf_tool *tool)
  85. {
  86. struct perf_session *self;
  87. struct stat st;
  88. size_t len;
  89. if (!filename || !strlen(filename)) {
  90. if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
  91. filename = "-";
  92. else
  93. filename = "perf.data";
  94. }
  95. len = strlen(filename);
  96. self = zalloc(sizeof(*self) + len);
  97. if (self == NULL)
  98. goto out;
  99. memcpy(self->filename, filename, len);
  100. self->repipe = repipe;
  101. INIT_LIST_HEAD(&self->ordered_samples.samples);
  102. INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
  103. INIT_LIST_HEAD(&self->ordered_samples.to_free);
  104. machines__init(&self->machines);
  105. if (mode == O_RDONLY) {
  106. if (perf_session__open(self, force) < 0)
  107. goto out_delete;
  108. perf_session__set_id_hdr_size(self);
  109. } else if (mode == O_WRONLY) {
  110. /*
  111. * In O_RDONLY mode this will be performed when reading the
  112. * kernel MMAP event, in perf_event__process_mmap().
  113. */
  114. if (perf_session__create_kernel_maps(self) < 0)
  115. goto out_delete;
  116. }
  117. if (tool && tool->ordering_requires_timestamps &&
  118. tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
  119. dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
  120. tool->ordered_samples = false;
  121. }
  122. out:
  123. return self;
  124. out_delete:
  125. perf_session__delete(self);
  126. return NULL;
  127. }
  128. static void perf_session__delete_dead_threads(struct perf_session *session)
  129. {
  130. machine__delete_dead_threads(&session->machines.host);
  131. }
  132. static void perf_session__delete_threads(struct perf_session *session)
  133. {
  134. machine__delete_threads(&session->machines.host);
  135. }
  136. static void perf_session_env__delete(struct perf_session_env *env)
  137. {
  138. free(env->hostname);
  139. free(env->os_release);
  140. free(env->version);
  141. free(env->arch);
  142. free(env->cpu_desc);
  143. free(env->cpuid);
  144. free(env->cmdline);
  145. free(env->sibling_cores);
  146. free(env->sibling_threads);
  147. free(env->numa_nodes);
  148. free(env->pmu_mappings);
  149. }
  150. void perf_session__delete(struct perf_session *self)
  151. {
  152. perf_session__destroy_kernel_maps(self);
  153. perf_session__delete_dead_threads(self);
  154. perf_session__delete_threads(self);
  155. perf_session_env__delete(&self->header.env);
  156. machines__exit(&self->machines);
  157. close(self->fd);
  158. free(self);
  159. vdso__exit();
  160. }
  161. static int process_event_synth_tracing_data_stub(struct perf_tool *tool
  162. __maybe_unused,
  163. union perf_event *event
  164. __maybe_unused,
  165. struct perf_session *session
  166. __maybe_unused)
  167. {
  168. dump_printf(": unhandled!\n");
  169. return 0;
  170. }
  171. static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
  172. union perf_event *event __maybe_unused,
  173. struct perf_evlist **pevlist
  174. __maybe_unused)
  175. {
  176. dump_printf(": unhandled!\n");
  177. return 0;
  178. }
  179. static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
  180. union perf_event *event __maybe_unused,
  181. struct perf_sample *sample __maybe_unused,
  182. struct perf_evsel *evsel __maybe_unused,
  183. struct machine *machine __maybe_unused)
  184. {
  185. dump_printf(": unhandled!\n");
  186. return 0;
  187. }
  188. static int process_event_stub(struct perf_tool *tool __maybe_unused,
  189. union perf_event *event __maybe_unused,
  190. struct perf_sample *sample __maybe_unused,
  191. struct machine *machine __maybe_unused)
  192. {
  193. dump_printf(": unhandled!\n");
  194. return 0;
  195. }
  196. static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
  197. union perf_event *event __maybe_unused,
  198. struct perf_session *perf_session
  199. __maybe_unused)
  200. {
  201. dump_printf(": unhandled!\n");
  202. return 0;
  203. }
  204. static int process_finished_round(struct perf_tool *tool,
  205. union perf_event *event,
  206. struct perf_session *session);
  207. static void perf_tool__fill_defaults(struct perf_tool *tool)
  208. {
  209. if (tool->sample == NULL)
  210. tool->sample = process_event_sample_stub;
  211. if (tool->mmap == NULL)
  212. tool->mmap = process_event_stub;
  213. if (tool->comm == NULL)
  214. tool->comm = process_event_stub;
  215. if (tool->fork == NULL)
  216. tool->fork = process_event_stub;
  217. if (tool->exit == NULL)
  218. tool->exit = process_event_stub;
  219. if (tool->lost == NULL)
  220. tool->lost = perf_event__process_lost;
  221. if (tool->read == NULL)
  222. tool->read = process_event_sample_stub;
  223. if (tool->throttle == NULL)
  224. tool->throttle = process_event_stub;
  225. if (tool->unthrottle == NULL)
  226. tool->unthrottle = process_event_stub;
  227. if (tool->attr == NULL)
  228. tool->attr = process_event_synth_attr_stub;
  229. if (tool->tracing_data == NULL)
  230. tool->tracing_data = process_event_synth_tracing_data_stub;
  231. if (tool->build_id == NULL)
  232. tool->build_id = process_finished_round_stub;
  233. if (tool->finished_round == NULL) {
  234. if (tool->ordered_samples)
  235. tool->finished_round = process_finished_round;
  236. else
  237. tool->finished_round = process_finished_round_stub;
  238. }
  239. }
  240. void mem_bswap_32(void *src, int byte_size)
  241. {
  242. u32 *m = src;
  243. while (byte_size > 0) {
  244. *m = bswap_32(*m);
  245. byte_size -= sizeof(u32);
  246. ++m;
  247. }
  248. }
  249. void mem_bswap_64(void *src, int byte_size)
  250. {
  251. u64 *m = src;
  252. while (byte_size > 0) {
  253. *m = bswap_64(*m);
  254. byte_size -= sizeof(u64);
  255. ++m;
  256. }
  257. }
  258. static void swap_sample_id_all(union perf_event *event, void *data)
  259. {
  260. void *end = (void *) event + event->header.size;
  261. int size = end - data;
  262. BUG_ON(size % sizeof(u64));
  263. mem_bswap_64(data, size);
  264. }
  265. static void perf_event__all64_swap(union perf_event *event,
  266. bool sample_id_all __maybe_unused)
  267. {
  268. struct perf_event_header *hdr = &event->header;
  269. mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
  270. }
  271. static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
  272. {
  273. event->comm.pid = bswap_32(event->comm.pid);
  274. event->comm.tid = bswap_32(event->comm.tid);
  275. if (sample_id_all) {
  276. void *data = &event->comm.comm;
  277. data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
  278. swap_sample_id_all(event, data);
  279. }
  280. }
  281. static void perf_event__mmap_swap(union perf_event *event,
  282. bool sample_id_all)
  283. {
  284. event->mmap.pid = bswap_32(event->mmap.pid);
  285. event->mmap.tid = bswap_32(event->mmap.tid);
  286. event->mmap.start = bswap_64(event->mmap.start);
  287. event->mmap.len = bswap_64(event->mmap.len);
  288. event->mmap.pgoff = bswap_64(event->mmap.pgoff);
  289. if (sample_id_all) {
  290. void *data = &event->mmap.filename;
  291. data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
  292. swap_sample_id_all(event, data);
  293. }
  294. }
  295. static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
  296. {
  297. event->fork.pid = bswap_32(event->fork.pid);
  298. event->fork.tid = bswap_32(event->fork.tid);
  299. event->fork.ppid = bswap_32(event->fork.ppid);
  300. event->fork.ptid = bswap_32(event->fork.ptid);
  301. event->fork.time = bswap_64(event->fork.time);
  302. if (sample_id_all)
  303. swap_sample_id_all(event, &event->fork + 1);
  304. }
  305. static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
  306. {
  307. event->read.pid = bswap_32(event->read.pid);
  308. event->read.tid = bswap_32(event->read.tid);
  309. event->read.value = bswap_64(event->read.value);
  310. event->read.time_enabled = bswap_64(event->read.time_enabled);
  311. event->read.time_running = bswap_64(event->read.time_running);
  312. event->read.id = bswap_64(event->read.id);
  313. if (sample_id_all)
  314. swap_sample_id_all(event, &event->read + 1);
  315. }
  316. static u8 revbyte(u8 b)
  317. {
  318. int rev = (b >> 4) | ((b & 0xf) << 4);
  319. rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
  320. rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
  321. return (u8) rev;
  322. }
  323. /*
  324. * XXX this is hack in attempt to carry flags bitfield
  325. * throught endian village. ABI says:
  326. *
  327. * Bit-fields are allocated from right to left (least to most significant)
  328. * on little-endian implementations and from left to right (most to least
  329. * significant) on big-endian implementations.
  330. *
  331. * The above seems to be byte specific, so we need to reverse each
  332. * byte of the bitfield. 'Internet' also says this might be implementation
  333. * specific and we probably need proper fix and carry perf_event_attr
  334. * bitfield flags in separate data file FEAT_ section. Thought this seems
  335. * to work for now.
  336. */
  337. static void swap_bitfield(u8 *p, unsigned len)
  338. {
  339. unsigned i;
  340. for (i = 0; i < len; i++) {
  341. *p = revbyte(*p);
  342. p++;
  343. }
  344. }
  345. /* exported for swapping attributes in file header */
  346. void perf_event__attr_swap(struct perf_event_attr *attr)
  347. {
  348. attr->type = bswap_32(attr->type);
  349. attr->size = bswap_32(attr->size);
  350. attr->config = bswap_64(attr->config);
  351. attr->sample_period = bswap_64(attr->sample_period);
  352. attr->sample_type = bswap_64(attr->sample_type);
  353. attr->read_format = bswap_64(attr->read_format);
  354. attr->wakeup_events = bswap_32(attr->wakeup_events);
  355. attr->bp_type = bswap_32(attr->bp_type);
  356. attr->bp_addr = bswap_64(attr->bp_addr);
  357. attr->bp_len = bswap_64(attr->bp_len);
  358. swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
  359. }
  360. static void perf_event__hdr_attr_swap(union perf_event *event,
  361. bool sample_id_all __maybe_unused)
  362. {
  363. size_t size;
  364. perf_event__attr_swap(&event->attr.attr);
  365. size = event->header.size;
  366. size -= (void *)&event->attr.id - (void *)event;
  367. mem_bswap_64(event->attr.id, size);
  368. }
  369. static void perf_event__event_type_swap(union perf_event *event,
  370. bool sample_id_all __maybe_unused)
  371. {
  372. event->event_type.event_type.event_id =
  373. bswap_64(event->event_type.event_type.event_id);
  374. }
  375. static void perf_event__tracing_data_swap(union perf_event *event,
  376. bool sample_id_all __maybe_unused)
  377. {
  378. event->tracing_data.size = bswap_32(event->tracing_data.size);
  379. }
  380. typedef void (*perf_event__swap_op)(union perf_event *event,
  381. bool sample_id_all);
  382. static perf_event__swap_op perf_event__swap_ops[] = {
  383. [PERF_RECORD_MMAP] = perf_event__mmap_swap,
  384. [PERF_RECORD_COMM] = perf_event__comm_swap,
  385. [PERF_RECORD_FORK] = perf_event__task_swap,
  386. [PERF_RECORD_EXIT] = perf_event__task_swap,
  387. [PERF_RECORD_LOST] = perf_event__all64_swap,
  388. [PERF_RECORD_READ] = perf_event__read_swap,
  389. [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
  390. [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
  391. [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
  392. [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
  393. [PERF_RECORD_HEADER_BUILD_ID] = NULL,
  394. [PERF_RECORD_HEADER_MAX] = NULL,
  395. };
  396. struct sample_queue {
  397. u64 timestamp;
  398. u64 file_offset;
  399. union perf_event *event;
  400. struct list_head list;
  401. };
  402. static void perf_session_free_sample_buffers(struct perf_session *session)
  403. {
  404. struct ordered_samples *os = &session->ordered_samples;
  405. while (!list_empty(&os->to_free)) {
  406. struct sample_queue *sq;
  407. sq = list_entry(os->to_free.next, struct sample_queue, list);
  408. list_del(&sq->list);
  409. free(sq);
  410. }
  411. }
  412. static int perf_session_deliver_event(struct perf_session *session,
  413. union perf_event *event,
  414. struct perf_sample *sample,
  415. struct perf_tool *tool,
  416. u64 file_offset);
  417. static int flush_sample_queue(struct perf_session *s,
  418. struct perf_tool *tool)
  419. {
  420. struct ordered_samples *os = &s->ordered_samples;
  421. struct list_head *head = &os->samples;
  422. struct sample_queue *tmp, *iter;
  423. struct perf_sample sample;
  424. u64 limit = os->next_flush;
  425. u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
  426. unsigned idx = 0, progress_next = os->nr_samples / 16;
  427. int ret;
  428. if (!tool->ordered_samples || !limit)
  429. return 0;
  430. list_for_each_entry_safe(iter, tmp, head, list) {
  431. if (iter->timestamp > limit)
  432. break;
  433. ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
  434. if (ret)
  435. pr_err("Can't parse sample, err = %d\n", ret);
  436. else {
  437. ret = perf_session_deliver_event(s, iter->event, &sample, tool,
  438. iter->file_offset);
  439. if (ret)
  440. return ret;
  441. }
  442. os->last_flush = iter->timestamp;
  443. list_del(&iter->list);
  444. list_add(&iter->list, &os->sample_cache);
  445. if (++idx >= progress_next) {
  446. progress_next += os->nr_samples / 16;
  447. ui_progress__update(idx, os->nr_samples,
  448. "Processing time ordered events...");
  449. }
  450. }
  451. if (list_empty(head)) {
  452. os->last_sample = NULL;
  453. } else if (last_ts <= limit) {
  454. os->last_sample =
  455. list_entry(head->prev, struct sample_queue, list);
  456. }
  457. os->nr_samples = 0;
  458. return 0;
  459. }
  460. /*
  461. * When perf record finishes a pass on every buffers, it records this pseudo
  462. * event.
  463. * We record the max timestamp t found in the pass n.
  464. * Assuming these timestamps are monotonic across cpus, we know that if
  465. * a buffer still has events with timestamps below t, they will be all
  466. * available and then read in the pass n + 1.
  467. * Hence when we start to read the pass n + 2, we can safely flush every
  468. * events with timestamps below t.
  469. *
  470. * ============ PASS n =================
  471. * CPU 0 | CPU 1
  472. * |
  473. * cnt1 timestamps | cnt2 timestamps
  474. * 1 | 2
  475. * 2 | 3
  476. * - | 4 <--- max recorded
  477. *
  478. * ============ PASS n + 1 ==============
  479. * CPU 0 | CPU 1
  480. * |
  481. * cnt1 timestamps | cnt2 timestamps
  482. * 3 | 5
  483. * 4 | 6
  484. * 5 | 7 <---- max recorded
  485. *
  486. * Flush every events below timestamp 4
  487. *
  488. * ============ PASS n + 2 ==============
  489. * CPU 0 | CPU 1
  490. * |
  491. * cnt1 timestamps | cnt2 timestamps
  492. * 6 | 8
  493. * 7 | 9
  494. * - | 10
  495. *
  496. * Flush every events below timestamp 7
  497. * etc...
  498. */
  499. static int process_finished_round(struct perf_tool *tool,
  500. union perf_event *event __maybe_unused,
  501. struct perf_session *session)
  502. {
  503. int ret = flush_sample_queue(session, tool);
  504. if (!ret)
  505. session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
  506. return ret;
  507. }
  508. /* The queue is ordered by time */
  509. static void __queue_event(struct sample_queue *new, struct perf_session *s)
  510. {
  511. struct ordered_samples *os = &s->ordered_samples;
  512. struct sample_queue *sample = os->last_sample;
  513. u64 timestamp = new->timestamp;
  514. struct list_head *p;
  515. ++os->nr_samples;
  516. os->last_sample = new;
  517. if (!sample) {
  518. list_add(&new->list, &os->samples);
  519. os->max_timestamp = timestamp;
  520. return;
  521. }
  522. /*
  523. * last_sample might point to some random place in the list as it's
  524. * the last queued event. We expect that the new event is close to
  525. * this.
  526. */
  527. if (sample->timestamp <= timestamp) {
  528. while (sample->timestamp <= timestamp) {
  529. p = sample->list.next;
  530. if (p == &os->samples) {
  531. list_add_tail(&new->list, &os->samples);
  532. os->max_timestamp = timestamp;
  533. return;
  534. }
  535. sample = list_entry(p, struct sample_queue, list);
  536. }
  537. list_add_tail(&new->list, &sample->list);
  538. } else {
  539. while (sample->timestamp > timestamp) {
  540. p = sample->list.prev;
  541. if (p == &os->samples) {
  542. list_add(&new->list, &os->samples);
  543. return;
  544. }
  545. sample = list_entry(p, struct sample_queue, list);
  546. }
  547. list_add(&new->list, &sample->list);
  548. }
  549. }
  550. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
  551. static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
  552. struct perf_sample *sample, u64 file_offset)
  553. {
  554. struct ordered_samples *os = &s->ordered_samples;
  555. struct list_head *sc = &os->sample_cache;
  556. u64 timestamp = sample->time;
  557. struct sample_queue *new;
  558. if (!timestamp || timestamp == ~0ULL)
  559. return -ETIME;
  560. if (timestamp < s->ordered_samples.last_flush) {
  561. printf("Warning: Timestamp below last timeslice flush\n");
  562. return -EINVAL;
  563. }
  564. if (!list_empty(sc)) {
  565. new = list_entry(sc->next, struct sample_queue, list);
  566. list_del(&new->list);
  567. } else if (os->sample_buffer) {
  568. new = os->sample_buffer + os->sample_buffer_idx;
  569. if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
  570. os->sample_buffer = NULL;
  571. } else {
  572. os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
  573. if (!os->sample_buffer)
  574. return -ENOMEM;
  575. list_add(&os->sample_buffer->list, &os->to_free);
  576. os->sample_buffer_idx = 2;
  577. new = os->sample_buffer + 1;
  578. }
  579. new->timestamp = timestamp;
  580. new->file_offset = file_offset;
  581. new->event = event;
  582. __queue_event(new, s);
  583. return 0;
  584. }
  585. static void callchain__printf(struct perf_sample *sample)
  586. {
  587. unsigned int i;
  588. printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
  589. for (i = 0; i < sample->callchain->nr; i++)
  590. printf("..... %2d: %016" PRIx64 "\n",
  591. i, sample->callchain->ips[i]);
  592. }
  593. static void branch_stack__printf(struct perf_sample *sample)
  594. {
  595. uint64_t i;
  596. printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
  597. for (i = 0; i < sample->branch_stack->nr; i++)
  598. printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
  599. i, sample->branch_stack->entries[i].from,
  600. sample->branch_stack->entries[i].to);
  601. }
  602. static void regs_dump__printf(u64 mask, u64 *regs)
  603. {
  604. unsigned rid, i = 0;
  605. for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
  606. u64 val = regs[i++];
  607. printf(".... %-5s 0x%" PRIx64 "\n",
  608. perf_reg_name(rid), val);
  609. }
  610. }
  611. static void regs_user__printf(struct perf_sample *sample, u64 mask)
  612. {
  613. struct regs_dump *user_regs = &sample->user_regs;
  614. if (user_regs->regs) {
  615. printf("... user regs: mask 0x%" PRIx64 "\n", mask);
  616. regs_dump__printf(mask, user_regs->regs);
  617. }
  618. }
  619. static void stack_user__printf(struct stack_dump *dump)
  620. {
  621. printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
  622. dump->size, dump->offset);
  623. }
  624. static void perf_session__print_tstamp(struct perf_session *session,
  625. union perf_event *event,
  626. struct perf_sample *sample)
  627. {
  628. u64 sample_type = perf_evlist__sample_type(session->evlist);
  629. if (event->header.type != PERF_RECORD_SAMPLE &&
  630. !perf_evlist__sample_id_all(session->evlist)) {
  631. fputs("-1 -1 ", stdout);
  632. return;
  633. }
  634. if ((sample_type & PERF_SAMPLE_CPU))
  635. printf("%u ", sample->cpu);
  636. if (sample_type & PERF_SAMPLE_TIME)
  637. printf("%" PRIu64 " ", sample->time);
  638. }
  639. static void dump_event(struct perf_session *session, union perf_event *event,
  640. u64 file_offset, struct perf_sample *sample)
  641. {
  642. if (!dump_trace)
  643. return;
  644. printf("\n%#" PRIx64 " [%#x]: event: %d\n",
  645. file_offset, event->header.size, event->header.type);
  646. trace_event(event);
  647. if (sample)
  648. perf_session__print_tstamp(session, event, sample);
  649. printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
  650. event->header.size, perf_event__name(event->header.type));
  651. }
  652. static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
  653. struct perf_sample *sample)
  654. {
  655. u64 sample_type;
  656. if (!dump_trace)
  657. return;
  658. printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
  659. event->header.misc, sample->pid, sample->tid, sample->ip,
  660. sample->period, sample->addr);
  661. sample_type = evsel->attr.sample_type;
  662. if (sample_type & PERF_SAMPLE_CALLCHAIN)
  663. callchain__printf(sample);
  664. if (sample_type & PERF_SAMPLE_BRANCH_STACK)
  665. branch_stack__printf(sample);
  666. if (sample_type & PERF_SAMPLE_REGS_USER)
  667. regs_user__printf(sample, evsel->attr.sample_regs_user);
  668. if (sample_type & PERF_SAMPLE_STACK_USER)
  669. stack_user__printf(&sample->user_stack);
  670. if (sample_type & PERF_SAMPLE_WEIGHT)
  671. printf("... weight: %" PRIu64 "\n", sample->weight);
  672. if (sample_type & PERF_SAMPLE_DATA_SRC)
  673. printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
  674. }
  675. static struct machine *
  676. perf_session__find_machine_for_cpumode(struct perf_session *session,
  677. union perf_event *event)
  678. {
  679. const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  680. if (perf_guest &&
  681. ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
  682. (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
  683. u32 pid;
  684. if (event->header.type == PERF_RECORD_MMAP)
  685. pid = event->mmap.pid;
  686. else
  687. pid = event->ip.pid;
  688. return perf_session__findnew_machine(session, pid);
  689. }
  690. return &session->machines.host;
  691. }
  692. static int perf_session_deliver_event(struct perf_session *session,
  693. union perf_event *event,
  694. struct perf_sample *sample,
  695. struct perf_tool *tool,
  696. u64 file_offset)
  697. {
  698. struct perf_evsel *evsel;
  699. struct machine *machine;
  700. dump_event(session, event, file_offset, sample);
  701. evsel = perf_evlist__id2evsel(session->evlist, sample->id);
  702. if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
  703. /*
  704. * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
  705. * because the tools right now may apply filters, discarding
  706. * some of the samples. For consistency, in the future we
  707. * should have something like nr_filtered_samples and remove
  708. * the sample->period from total_sample_period, etc, KISS for
  709. * now tho.
  710. *
  711. * Also testing against NULL allows us to handle files without
  712. * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
  713. * future probably it'll be a good idea to restrict event
  714. * processing via perf_session to files with both set.
  715. */
  716. hists__inc_nr_events(&evsel->hists, event->header.type);
  717. }
  718. machine = perf_session__find_machine_for_cpumode(session, event);
  719. switch (event->header.type) {
  720. case PERF_RECORD_SAMPLE:
  721. dump_sample(evsel, event, sample);
  722. if (evsel == NULL) {
  723. ++session->stats.nr_unknown_id;
  724. return 0;
  725. }
  726. if (machine == NULL) {
  727. ++session->stats.nr_unprocessable_samples;
  728. return 0;
  729. }
  730. return tool->sample(tool, event, sample, evsel, machine);
  731. case PERF_RECORD_MMAP:
  732. return tool->mmap(tool, event, sample, machine);
  733. case PERF_RECORD_COMM:
  734. return tool->comm(tool, event, sample, machine);
  735. case PERF_RECORD_FORK:
  736. return tool->fork(tool, event, sample, machine);
  737. case PERF_RECORD_EXIT:
  738. return tool->exit(tool, event, sample, machine);
  739. case PERF_RECORD_LOST:
  740. if (tool->lost == perf_event__process_lost)
  741. session->stats.total_lost += event->lost.lost;
  742. return tool->lost(tool, event, sample, machine);
  743. case PERF_RECORD_READ:
  744. return tool->read(tool, event, sample, evsel, machine);
  745. case PERF_RECORD_THROTTLE:
  746. return tool->throttle(tool, event, sample, machine);
  747. case PERF_RECORD_UNTHROTTLE:
  748. return tool->unthrottle(tool, event, sample, machine);
  749. default:
  750. ++session->stats.nr_unknown_events;
  751. return -1;
  752. }
  753. }
  754. static int perf_session__preprocess_sample(struct perf_session *session,
  755. union perf_event *event, struct perf_sample *sample)
  756. {
  757. if (event->header.type != PERF_RECORD_SAMPLE ||
  758. !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
  759. return 0;
  760. if (!ip_callchain__valid(sample->callchain, event)) {
  761. pr_debug("call-chain problem with event, skipping it.\n");
  762. ++session->stats.nr_invalid_chains;
  763. session->stats.total_invalid_chains += sample->period;
  764. return -EINVAL;
  765. }
  766. return 0;
  767. }
  768. static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
  769. struct perf_tool *tool, u64 file_offset)
  770. {
  771. int err;
  772. dump_event(session, event, file_offset, NULL);
  773. /* These events are processed right away */
  774. switch (event->header.type) {
  775. case PERF_RECORD_HEADER_ATTR:
  776. err = tool->attr(tool, event, &session->evlist);
  777. if (err == 0)
  778. perf_session__set_id_hdr_size(session);
  779. return err;
  780. case PERF_RECORD_HEADER_TRACING_DATA:
  781. /* setup for reading amidst mmap */
  782. lseek(session->fd, file_offset, SEEK_SET);
  783. return tool->tracing_data(tool, event, session);
  784. case PERF_RECORD_HEADER_BUILD_ID:
  785. return tool->build_id(tool, event, session);
  786. case PERF_RECORD_FINISHED_ROUND:
  787. return tool->finished_round(tool, event, session);
  788. default:
  789. return -EINVAL;
  790. }
  791. }
  792. static void event_swap(union perf_event *event, bool sample_id_all)
  793. {
  794. perf_event__swap_op swap;
  795. swap = perf_event__swap_ops[event->header.type];
  796. if (swap)
  797. swap(event, sample_id_all);
  798. }
  799. static int perf_session__process_event(struct perf_session *session,
  800. union perf_event *event,
  801. struct perf_tool *tool,
  802. u64 file_offset)
  803. {
  804. struct perf_sample sample;
  805. int ret;
  806. if (session->header.needs_swap)
  807. event_swap(event, perf_evlist__sample_id_all(session->evlist));
  808. if (event->header.type >= PERF_RECORD_HEADER_MAX)
  809. return -EINVAL;
  810. events_stats__inc(&session->stats, event->header.type);
  811. if (event->header.type >= PERF_RECORD_USER_TYPE_START)
  812. return perf_session__process_user_event(session, event, tool, file_offset);
  813. /*
  814. * For all kernel events we get the sample data
  815. */
  816. ret = perf_evlist__parse_sample(session->evlist, event, &sample);
  817. if (ret)
  818. return ret;
  819. /* Preprocess sample records - precheck callchains */
  820. if (perf_session__preprocess_sample(session, event, &sample))
  821. return 0;
  822. if (tool->ordered_samples) {
  823. ret = perf_session_queue_event(session, event, &sample,
  824. file_offset);
  825. if (ret != -ETIME)
  826. return ret;
  827. }
  828. return perf_session_deliver_event(session, event, &sample, tool,
  829. file_offset);
  830. }
  831. void perf_event_header__bswap(struct perf_event_header *self)
  832. {
  833. self->type = bswap_32(self->type);
  834. self->misc = bswap_16(self->misc);
  835. self->size = bswap_16(self->size);
  836. }
  837. struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
  838. {
  839. return machine__findnew_thread(&session->machines.host, pid);
  840. }
  841. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  842. {
  843. struct thread *thread = perf_session__findnew(self, 0);
  844. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  845. pr_err("problem inserting idle task.\n");
  846. thread = NULL;
  847. }
  848. return thread;
  849. }
  850. static void perf_session__warn_about_errors(const struct perf_session *session,
  851. const struct perf_tool *tool)
  852. {
  853. if (tool->lost == perf_event__process_lost &&
  854. session->stats.nr_events[PERF_RECORD_LOST] != 0) {
  855. ui__warning("Processed %d events and lost %d chunks!\n\n"
  856. "Check IO/CPU overload!\n\n",
  857. session->stats.nr_events[0],
  858. session->stats.nr_events[PERF_RECORD_LOST]);
  859. }
  860. if (session->stats.nr_unknown_events != 0) {
  861. ui__warning("Found %u unknown events!\n\n"
  862. "Is this an older tool processing a perf.data "
  863. "file generated by a more recent tool?\n\n"
  864. "If that is not the case, consider "
  865. "reporting to linux-kernel@vger.kernel.org.\n\n",
  866. session->stats.nr_unknown_events);
  867. }
  868. if (session->stats.nr_unknown_id != 0) {
  869. ui__warning("%u samples with id not present in the header\n",
  870. session->stats.nr_unknown_id);
  871. }
  872. if (session->stats.nr_invalid_chains != 0) {
  873. ui__warning("Found invalid callchains!\n\n"
  874. "%u out of %u events were discarded for this reason.\n\n"
  875. "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
  876. session->stats.nr_invalid_chains,
  877. session->stats.nr_events[PERF_RECORD_SAMPLE]);
  878. }
  879. if (session->stats.nr_unprocessable_samples != 0) {
  880. ui__warning("%u unprocessable samples recorded.\n"
  881. "Do you have a KVM guest running and not using 'perf kvm'?\n",
  882. session->stats.nr_unprocessable_samples);
  883. }
  884. }
  885. #define session_done() (*(volatile int *)(&session_done))
  886. volatile int session_done;
  887. static int __perf_session__process_pipe_events(struct perf_session *self,
  888. struct perf_tool *tool)
  889. {
  890. union perf_event *event;
  891. uint32_t size, cur_size = 0;
  892. void *buf = NULL;
  893. int skip = 0;
  894. u64 head;
  895. int err;
  896. void *p;
  897. perf_tool__fill_defaults(tool);
  898. head = 0;
  899. cur_size = sizeof(union perf_event);
  900. buf = malloc(cur_size);
  901. if (!buf)
  902. return -errno;
  903. more:
  904. event = buf;
  905. err = readn(self->fd, event, sizeof(struct perf_event_header));
  906. if (err <= 0) {
  907. if (err == 0)
  908. goto done;
  909. pr_err("failed to read event header\n");
  910. goto out_err;
  911. }
  912. if (self->header.needs_swap)
  913. perf_event_header__bswap(&event->header);
  914. size = event->header.size;
  915. if (size < sizeof(struct perf_event_header)) {
  916. pr_err("bad event header size\n");
  917. goto out_err;
  918. }
  919. if (size > cur_size) {
  920. void *new = realloc(buf, size);
  921. if (!new) {
  922. pr_err("failed to allocate memory to read event\n");
  923. goto out_err;
  924. }
  925. buf = new;
  926. cur_size = size;
  927. event = buf;
  928. }
  929. p = event;
  930. p += sizeof(struct perf_event_header);
  931. if (size - sizeof(struct perf_event_header)) {
  932. err = readn(self->fd, p, size - sizeof(struct perf_event_header));
  933. if (err <= 0) {
  934. if (err == 0) {
  935. pr_err("unexpected end of event stream\n");
  936. goto done;
  937. }
  938. pr_err("failed to read event data\n");
  939. goto out_err;
  940. }
  941. }
  942. if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
  943. pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
  944. head, event->header.size, event->header.type);
  945. err = -EINVAL;
  946. goto out_err;
  947. }
  948. head += size;
  949. if (skip > 0)
  950. head += skip;
  951. if (!session_done())
  952. goto more;
  953. done:
  954. err = 0;
  955. out_err:
  956. free(buf);
  957. perf_session__warn_about_errors(self, tool);
  958. perf_session_free_sample_buffers(self);
  959. return err;
  960. }
  961. static union perf_event *
  962. fetch_mmaped_event(struct perf_session *session,
  963. u64 head, size_t mmap_size, char *buf)
  964. {
  965. union perf_event *event;
  966. /*
  967. * Ensure we have enough space remaining to read
  968. * the size of the event in the headers.
  969. */
  970. if (head + sizeof(event->header) > mmap_size)
  971. return NULL;
  972. event = (union perf_event *)(buf + head);
  973. if (session->header.needs_swap)
  974. perf_event_header__bswap(&event->header);
  975. if (head + event->header.size > mmap_size) {
  976. /* We're not fetching the event so swap back again */
  977. if (session->header.needs_swap)
  978. perf_event_header__bswap(&event->header);
  979. return NULL;
  980. }
  981. return event;
  982. }
  983. /*
  984. * On 64bit we can mmap the data file in one go. No need for tiny mmap
  985. * slices. On 32bit we use 32MB.
  986. */
  987. #if BITS_PER_LONG == 64
  988. #define MMAP_SIZE ULLONG_MAX
  989. #define NUM_MMAPS 1
  990. #else
  991. #define MMAP_SIZE (32 * 1024 * 1024ULL)
  992. #define NUM_MMAPS 128
  993. #endif
  994. int __perf_session__process_events(struct perf_session *session,
  995. u64 data_offset, u64 data_size,
  996. u64 file_size, struct perf_tool *tool)
  997. {
  998. u64 head, page_offset, file_offset, file_pos, progress_next;
  999. int err, mmap_prot, mmap_flags, map_idx = 0;
  1000. size_t mmap_size;
  1001. char *buf, *mmaps[NUM_MMAPS];
  1002. union perf_event *event;
  1003. uint32_t size;
  1004. perf_tool__fill_defaults(tool);
  1005. page_offset = page_size * (data_offset / page_size);
  1006. file_offset = page_offset;
  1007. head = data_offset - page_offset;
  1008. if (data_offset + data_size < file_size)
  1009. file_size = data_offset + data_size;
  1010. progress_next = file_size / 16;
  1011. mmap_size = MMAP_SIZE;
  1012. if (mmap_size > file_size)
  1013. mmap_size = file_size;
  1014. memset(mmaps, 0, sizeof(mmaps));
  1015. mmap_prot = PROT_READ;
  1016. mmap_flags = MAP_SHARED;
  1017. if (session->header.needs_swap) {
  1018. mmap_prot |= PROT_WRITE;
  1019. mmap_flags = MAP_PRIVATE;
  1020. }
  1021. remap:
  1022. buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
  1023. file_offset);
  1024. if (buf == MAP_FAILED) {
  1025. pr_err("failed to mmap file\n");
  1026. err = -errno;
  1027. goto out_err;
  1028. }
  1029. mmaps[map_idx] = buf;
  1030. map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
  1031. file_pos = file_offset + head;
  1032. more:
  1033. event = fetch_mmaped_event(session, head, mmap_size, buf);
  1034. if (!event) {
  1035. if (mmaps[map_idx]) {
  1036. munmap(mmaps[map_idx], mmap_size);
  1037. mmaps[map_idx] = NULL;
  1038. }
  1039. page_offset = page_size * (head / page_size);
  1040. file_offset += page_offset;
  1041. head -= page_offset;
  1042. goto remap;
  1043. }
  1044. size = event->header.size;
  1045. if (size < sizeof(struct perf_event_header) ||
  1046. perf_session__process_event(session, event, tool, file_pos) < 0) {
  1047. pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
  1048. file_offset + head, event->header.size,
  1049. event->header.type);
  1050. err = -EINVAL;
  1051. goto out_err;
  1052. }
  1053. head += size;
  1054. file_pos += size;
  1055. if (file_pos >= progress_next) {
  1056. progress_next += file_size / 16;
  1057. ui_progress__update(file_pos, file_size,
  1058. "Processing events...");
  1059. }
  1060. if (file_pos < file_size)
  1061. goto more;
  1062. err = 0;
  1063. /* do the final flush for ordered samples */
  1064. session->ordered_samples.next_flush = ULLONG_MAX;
  1065. err = flush_sample_queue(session, tool);
  1066. out_err:
  1067. ui_progress__finish();
  1068. perf_session__warn_about_errors(session, tool);
  1069. perf_session_free_sample_buffers(session);
  1070. return err;
  1071. }
  1072. int perf_session__process_events(struct perf_session *self,
  1073. struct perf_tool *tool)
  1074. {
  1075. int err;
  1076. if (perf_session__register_idle_thread(self) == NULL)
  1077. return -ENOMEM;
  1078. if (!self->fd_pipe)
  1079. err = __perf_session__process_events(self,
  1080. self->header.data_offset,
  1081. self->header.data_size,
  1082. self->size, tool);
  1083. else
  1084. err = __perf_session__process_pipe_events(self, tool);
  1085. return err;
  1086. }
  1087. bool perf_session__has_traces(struct perf_session *session, const char *msg)
  1088. {
  1089. if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
  1090. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  1091. return false;
  1092. }
  1093. return true;
  1094. }
  1095. int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
  1096. const char *symbol_name, u64 addr)
  1097. {
  1098. char *bracket;
  1099. enum map_type i;
  1100. struct ref_reloc_sym *ref;
  1101. ref = zalloc(sizeof(struct ref_reloc_sym));
  1102. if (ref == NULL)
  1103. return -ENOMEM;
  1104. ref->name = strdup(symbol_name);
  1105. if (ref->name == NULL) {
  1106. free(ref);
  1107. return -ENOMEM;
  1108. }
  1109. bracket = strchr(ref->name, ']');
  1110. if (bracket)
  1111. *bracket = '\0';
  1112. ref->addr = addr;
  1113. for (i = 0; i < MAP__NR_TYPES; ++i) {
  1114. struct kmap *kmap = map__kmap(maps[i]);
  1115. kmap->ref_reloc_sym = ref;
  1116. }
  1117. return 0;
  1118. }
  1119. size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
  1120. {
  1121. return machines__fprintf_dsos(&self->machines, fp);
  1122. }
  1123. size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
  1124. bool (skip)(struct dso *dso, int parm), int parm)
  1125. {
  1126. return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
  1127. }
  1128. size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
  1129. {
  1130. struct perf_evsel *pos;
  1131. size_t ret = fprintf(fp, "Aggregated stats:\n");
  1132. ret += events_stats__fprintf(&session->stats, fp);
  1133. list_for_each_entry(pos, &session->evlist->entries, node) {
  1134. ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
  1135. ret += events_stats__fprintf(&pos->hists.stats, fp);
  1136. }
  1137. return ret;
  1138. }
  1139. size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
  1140. {
  1141. /*
  1142. * FIXME: Here we have to actually print all the machines in this
  1143. * session, not just the host...
  1144. */
  1145. return machine__fprintf(&session->machines.host, fp);
  1146. }
  1147. struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
  1148. unsigned int type)
  1149. {
  1150. struct perf_evsel *pos;
  1151. list_for_each_entry(pos, &session->evlist->entries, node) {
  1152. if (pos->attr.type == type)
  1153. return pos;
  1154. }
  1155. return NULL;
  1156. }
  1157. void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
  1158. struct perf_sample *sample, struct machine *machine,
  1159. int print_sym, int print_dso, int print_symoffset)
  1160. {
  1161. struct addr_location al;
  1162. struct callchain_cursor_node *node;
  1163. if (perf_event__preprocess_sample(event, machine, &al, sample,
  1164. NULL) < 0) {
  1165. error("problem processing %d event, skipping it.\n",
  1166. event->header.type);
  1167. return;
  1168. }
  1169. if (symbol_conf.use_callchain && sample->callchain) {
  1170. if (machine__resolve_callchain(machine, evsel, al.thread,
  1171. sample, NULL, NULL) != 0) {
  1172. if (verbose)
  1173. error("Failed to resolve callchain. Skipping\n");
  1174. return;
  1175. }
  1176. callchain_cursor_commit(&callchain_cursor);
  1177. while (1) {
  1178. node = callchain_cursor_current(&callchain_cursor);
  1179. if (!node)
  1180. break;
  1181. printf("\t%16" PRIx64, node->ip);
  1182. if (print_sym) {
  1183. printf(" ");
  1184. symbol__fprintf_symname(node->sym, stdout);
  1185. }
  1186. if (print_dso) {
  1187. printf(" (");
  1188. map__fprintf_dsoname(node->map, stdout);
  1189. printf(")");
  1190. }
  1191. printf("\n");
  1192. callchain_cursor_advance(&callchain_cursor);
  1193. }
  1194. } else {
  1195. printf("%16" PRIx64, sample->ip);
  1196. if (print_sym) {
  1197. printf(" ");
  1198. if (print_symoffset)
  1199. symbol__fprintf_symname_offs(al.sym, &al,
  1200. stdout);
  1201. else
  1202. symbol__fprintf_symname(al.sym, stdout);
  1203. }
  1204. if (print_dso) {
  1205. printf(" (");
  1206. map__fprintf_dsoname(al.map, stdout);
  1207. printf(")");
  1208. }
  1209. }
  1210. }
  1211. int perf_session__cpu_bitmap(struct perf_session *session,
  1212. const char *cpu_list, unsigned long *cpu_bitmap)
  1213. {
  1214. int i;
  1215. struct cpu_map *map;
  1216. for (i = 0; i < PERF_TYPE_MAX; ++i) {
  1217. struct perf_evsel *evsel;
  1218. evsel = perf_session__find_first_evtype(session, i);
  1219. if (!evsel)
  1220. continue;
  1221. if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
  1222. pr_err("File does not contain CPU events. "
  1223. "Remove -c option to proceed.\n");
  1224. return -1;
  1225. }
  1226. }
  1227. map = cpu_map__new(cpu_list);
  1228. if (map == NULL) {
  1229. pr_err("Invalid cpu_list\n");
  1230. return -1;
  1231. }
  1232. for (i = 0; i < map->nr; i++) {
  1233. int cpu = map->map[i];
  1234. if (cpu >= MAX_NR_CPUS) {
  1235. pr_err("Requested CPU %d too large. "
  1236. "Consider raising MAX_NR_CPUS\n", cpu);
  1237. return -1;
  1238. }
  1239. set_bit(cpu, cpu_bitmap);
  1240. }
  1241. return 0;
  1242. }
  1243. void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
  1244. bool full)
  1245. {
  1246. struct stat st;
  1247. int ret;
  1248. if (session == NULL || fp == NULL)
  1249. return;
  1250. ret = fstat(session->fd, &st);
  1251. if (ret == -1)
  1252. return;
  1253. fprintf(fp, "# ========\n");
  1254. fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
  1255. perf_header__fprintf_info(session, fp, full);
  1256. fprintf(fp, "# ========\n#\n");
  1257. }
  1258. int __perf_session__set_tracepoints_handlers(struct perf_session *session,
  1259. const struct perf_evsel_str_handler *assocs,
  1260. size_t nr_assocs)
  1261. {
  1262. struct perf_evlist *evlist = session->evlist;
  1263. struct event_format *format;
  1264. struct perf_evsel *evsel;
  1265. char *tracepoint, *name;
  1266. size_t i;
  1267. int err;
  1268. for (i = 0; i < nr_assocs; i++) {
  1269. err = -ENOMEM;
  1270. tracepoint = strdup(assocs[i].name);
  1271. if (tracepoint == NULL)
  1272. goto out;
  1273. err = -ENOENT;
  1274. name = strchr(tracepoint, ':');
  1275. if (name == NULL)
  1276. goto out_free;
  1277. *name++ = '\0';
  1278. format = pevent_find_event_by_name(session->pevent,
  1279. tracepoint, name);
  1280. if (format == NULL) {
  1281. /*
  1282. * Adding a handler for an event not in the session,
  1283. * just ignore it.
  1284. */
  1285. goto next;
  1286. }
  1287. evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
  1288. if (evsel == NULL)
  1289. goto next;
  1290. err = -EEXIST;
  1291. if (evsel->handler.func != NULL)
  1292. goto out_free;
  1293. evsel->handler.func = assocs[i].handler;
  1294. next:
  1295. free(tracepoint);
  1296. }
  1297. err = 0;
  1298. out:
  1299. return err;
  1300. out_free:
  1301. free(tracepoint);
  1302. goto out;
  1303. }