session.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. #define _FILE_OFFSET_BITS 64
  2. #include <linux/kernel.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include <sys/mman.h>
  7. #include "evlist.h"
  8. #include "evsel.h"
  9. #include "session.h"
  10. #include "sort.h"
  11. #include "util.h"
  12. static int perf_session__open(struct perf_session *self, bool force)
  13. {
  14. struct stat input_stat;
  15. if (!strcmp(self->filename, "-")) {
  16. self->fd_pipe = true;
  17. self->fd = STDIN_FILENO;
  18. if (perf_session__read_header(self, self->fd) < 0)
  19. pr_err("incompatible file format");
  20. return 0;
  21. }
  22. self->fd = open(self->filename, O_RDONLY);
  23. if (self->fd < 0) {
  24. int err = errno;
  25. pr_err("failed to open %s: %s", self->filename, strerror(err));
  26. if (err == ENOENT && !strcmp(self->filename, "perf.data"))
  27. pr_err(" (try 'perf record' first)");
  28. pr_err("\n");
  29. return -errno;
  30. }
  31. if (fstat(self->fd, &input_stat) < 0)
  32. goto out_close;
  33. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  34. pr_err("file %s not owned by current user or root\n",
  35. self->filename);
  36. goto out_close;
  37. }
  38. if (!input_stat.st_size) {
  39. pr_info("zero-sized file (%s), nothing to do!\n",
  40. self->filename);
  41. goto out_close;
  42. }
  43. if (perf_session__read_header(self, self->fd) < 0) {
  44. pr_err("incompatible file format");
  45. goto out_close;
  46. }
  47. self->size = input_stat.st_size;
  48. return 0;
  49. out_close:
  50. close(self->fd);
  51. self->fd = -1;
  52. return -1;
  53. }
  54. static void perf_session__id_header_size(struct perf_session *session)
  55. {
  56. struct perf_sample *data;
  57. u64 sample_type = session->sample_type;
  58. u16 size = 0;
  59. if (!session->sample_id_all)
  60. goto out;
  61. if (sample_type & PERF_SAMPLE_TID)
  62. size += sizeof(data->tid) * 2;
  63. if (sample_type & PERF_SAMPLE_TIME)
  64. size += sizeof(data->time);
  65. if (sample_type & PERF_SAMPLE_ID)
  66. size += sizeof(data->id);
  67. if (sample_type & PERF_SAMPLE_STREAM_ID)
  68. size += sizeof(data->stream_id);
  69. if (sample_type & PERF_SAMPLE_CPU)
  70. size += sizeof(data->cpu) * 2;
  71. out:
  72. session->id_hdr_size = size;
  73. }
  74. void perf_session__update_sample_type(struct perf_session *self)
  75. {
  76. self->sample_type = perf_evlist__sample_type(self->evlist);
  77. self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
  78. perf_session__id_header_size(self);
  79. }
  80. int perf_session__create_kernel_maps(struct perf_session *self)
  81. {
  82. int ret = machine__create_kernel_maps(&self->host_machine);
  83. if (ret >= 0)
  84. ret = machines__create_guest_kernel_maps(&self->machines);
  85. return ret;
  86. }
  87. static void perf_session__destroy_kernel_maps(struct perf_session *self)
  88. {
  89. machine__destroy_kernel_maps(&self->host_machine);
  90. machines__destroy_guest_kernel_maps(&self->machines);
  91. }
  92. struct perf_session *perf_session__new(const char *filename, int mode,
  93. bool force, bool repipe,
  94. struct perf_event_ops *ops)
  95. {
  96. size_t len = filename ? strlen(filename) + 1 : 0;
  97. struct perf_session *self = zalloc(sizeof(*self) + len);
  98. if (self == NULL)
  99. goto out;
  100. memcpy(self->filename, filename, len);
  101. self->threads = RB_ROOT;
  102. INIT_LIST_HEAD(&self->dead_threads);
  103. self->last_match = NULL;
  104. /*
  105. * On 64bit we can mmap the data file in one go. No need for tiny mmap
  106. * slices. On 32bit we use 32MB.
  107. */
  108. #if BITS_PER_LONG == 64
  109. self->mmap_window = ULLONG_MAX;
  110. #else
  111. self->mmap_window = 32 * 1024 * 1024ULL;
  112. #endif
  113. self->machines = RB_ROOT;
  114. self->repipe = repipe;
  115. INIT_LIST_HEAD(&self->ordered_samples.samples);
  116. INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
  117. INIT_LIST_HEAD(&self->ordered_samples.to_free);
  118. machine__init(&self->host_machine, "", HOST_KERNEL_ID);
  119. if (mode == O_RDONLY) {
  120. if (perf_session__open(self, force) < 0)
  121. goto out_delete;
  122. perf_session__update_sample_type(self);
  123. } else if (mode == O_WRONLY) {
  124. /*
  125. * In O_RDONLY mode this will be performed when reading the
  126. * kernel MMAP event, in perf_event__process_mmap().
  127. */
  128. if (perf_session__create_kernel_maps(self) < 0)
  129. goto out_delete;
  130. }
  131. if (ops && ops->ordering_requires_timestamps &&
  132. ops->ordered_samples && !self->sample_id_all) {
  133. dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
  134. ops->ordered_samples = false;
  135. }
  136. out:
  137. return self;
  138. out_delete:
  139. perf_session__delete(self);
  140. return NULL;
  141. }
  142. static void perf_session__delete_dead_threads(struct perf_session *self)
  143. {
  144. struct thread *n, *t;
  145. list_for_each_entry_safe(t, n, &self->dead_threads, node) {
  146. list_del(&t->node);
  147. thread__delete(t);
  148. }
  149. }
  150. static void perf_session__delete_threads(struct perf_session *self)
  151. {
  152. struct rb_node *nd = rb_first(&self->threads);
  153. while (nd) {
  154. struct thread *t = rb_entry(nd, struct thread, rb_node);
  155. rb_erase(&t->rb_node, &self->threads);
  156. nd = rb_next(nd);
  157. thread__delete(t);
  158. }
  159. }
  160. void perf_session__delete(struct perf_session *self)
  161. {
  162. perf_session__destroy_kernel_maps(self);
  163. perf_session__delete_dead_threads(self);
  164. perf_session__delete_threads(self);
  165. machine__exit(&self->host_machine);
  166. close(self->fd);
  167. free(self);
  168. }
  169. void perf_session__remove_thread(struct perf_session *self, struct thread *th)
  170. {
  171. self->last_match = NULL;
  172. rb_erase(&th->rb_node, &self->threads);
  173. /*
  174. * We may have references to this thread, for instance in some hist_entry
  175. * instances, so just move them to a separate list.
  176. */
  177. list_add_tail(&th->node, &self->dead_threads);
  178. }
  179. static bool symbol__match_parent_regex(struct symbol *sym)
  180. {
  181. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  182. return 1;
  183. return 0;
  184. }
  185. int perf_session__resolve_callchain(struct perf_session *self,
  186. struct thread *thread,
  187. struct ip_callchain *chain,
  188. struct symbol **parent)
  189. {
  190. u8 cpumode = PERF_RECORD_MISC_USER;
  191. unsigned int i;
  192. int err;
  193. callchain_cursor_reset(&self->callchain_cursor);
  194. for (i = 0; i < chain->nr; i++) {
  195. u64 ip = chain->ips[i];
  196. struct addr_location al;
  197. if (ip >= PERF_CONTEXT_MAX) {
  198. switch (ip) {
  199. case PERF_CONTEXT_HV:
  200. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  201. case PERF_CONTEXT_KERNEL:
  202. cpumode = PERF_RECORD_MISC_KERNEL; break;
  203. case PERF_CONTEXT_USER:
  204. cpumode = PERF_RECORD_MISC_USER; break;
  205. default:
  206. break;
  207. }
  208. continue;
  209. }
  210. al.filtered = false;
  211. thread__find_addr_location(thread, self, cpumode,
  212. MAP__FUNCTION, thread->pid, ip, &al, NULL);
  213. if (al.sym != NULL) {
  214. if (sort__has_parent && !*parent &&
  215. symbol__match_parent_regex(al.sym))
  216. *parent = al.sym;
  217. if (!symbol_conf.use_callchain)
  218. break;
  219. }
  220. err = callchain_cursor_append(&self->callchain_cursor,
  221. ip, al.map, al.sym);
  222. if (err)
  223. return err;
  224. }
  225. return 0;
  226. }
  227. static int process_event_synth_stub(union perf_event *event __used,
  228. struct perf_session *session __used)
  229. {
  230. dump_printf(": unhandled!\n");
  231. return 0;
  232. }
  233. static int process_event_stub(union perf_event *event __used,
  234. struct perf_sample *sample __used,
  235. struct perf_session *session __used)
  236. {
  237. dump_printf(": unhandled!\n");
  238. return 0;
  239. }
  240. static int process_finished_round_stub(union perf_event *event __used,
  241. struct perf_session *session __used,
  242. struct perf_event_ops *ops __used)
  243. {
  244. dump_printf(": unhandled!\n");
  245. return 0;
  246. }
  247. static int process_finished_round(union perf_event *event,
  248. struct perf_session *session,
  249. struct perf_event_ops *ops);
  250. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  251. {
  252. if (handler->sample == NULL)
  253. handler->sample = process_event_stub;
  254. if (handler->mmap == NULL)
  255. handler->mmap = process_event_stub;
  256. if (handler->comm == NULL)
  257. handler->comm = process_event_stub;
  258. if (handler->fork == NULL)
  259. handler->fork = process_event_stub;
  260. if (handler->exit == NULL)
  261. handler->exit = process_event_stub;
  262. if (handler->lost == NULL)
  263. handler->lost = perf_event__process_lost;
  264. if (handler->read == NULL)
  265. handler->read = process_event_stub;
  266. if (handler->throttle == NULL)
  267. handler->throttle = process_event_stub;
  268. if (handler->unthrottle == NULL)
  269. handler->unthrottle = process_event_stub;
  270. if (handler->attr == NULL)
  271. handler->attr = process_event_synth_stub;
  272. if (handler->event_type == NULL)
  273. handler->event_type = process_event_synth_stub;
  274. if (handler->tracing_data == NULL)
  275. handler->tracing_data = process_event_synth_stub;
  276. if (handler->build_id == NULL)
  277. handler->build_id = process_event_synth_stub;
  278. if (handler->finished_round == NULL) {
  279. if (handler->ordered_samples)
  280. handler->finished_round = process_finished_round;
  281. else
  282. handler->finished_round = process_finished_round_stub;
  283. }
  284. }
  285. void mem_bswap_64(void *src, int byte_size)
  286. {
  287. u64 *m = src;
  288. while (byte_size > 0) {
  289. *m = bswap_64(*m);
  290. byte_size -= sizeof(u64);
  291. ++m;
  292. }
  293. }
  294. static void perf_event__all64_swap(union perf_event *event)
  295. {
  296. struct perf_event_header *hdr = &event->header;
  297. mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
  298. }
  299. static void perf_event__comm_swap(union perf_event *event)
  300. {
  301. event->comm.pid = bswap_32(event->comm.pid);
  302. event->comm.tid = bswap_32(event->comm.tid);
  303. }
  304. static void perf_event__mmap_swap(union perf_event *event)
  305. {
  306. event->mmap.pid = bswap_32(event->mmap.pid);
  307. event->mmap.tid = bswap_32(event->mmap.tid);
  308. event->mmap.start = bswap_64(event->mmap.start);
  309. event->mmap.len = bswap_64(event->mmap.len);
  310. event->mmap.pgoff = bswap_64(event->mmap.pgoff);
  311. }
  312. static void perf_event__task_swap(union perf_event *event)
  313. {
  314. event->fork.pid = bswap_32(event->fork.pid);
  315. event->fork.tid = bswap_32(event->fork.tid);
  316. event->fork.ppid = bswap_32(event->fork.ppid);
  317. event->fork.ptid = bswap_32(event->fork.ptid);
  318. event->fork.time = bswap_64(event->fork.time);
  319. }
  320. static void perf_event__read_swap(union perf_event *event)
  321. {
  322. event->read.pid = bswap_32(event->read.pid);
  323. event->read.tid = bswap_32(event->read.tid);
  324. event->read.value = bswap_64(event->read.value);
  325. event->read.time_enabled = bswap_64(event->read.time_enabled);
  326. event->read.time_running = bswap_64(event->read.time_running);
  327. event->read.id = bswap_64(event->read.id);
  328. }
  329. static void perf_event__attr_swap(union perf_event *event)
  330. {
  331. size_t size;
  332. event->attr.attr.type = bswap_32(event->attr.attr.type);
  333. event->attr.attr.size = bswap_32(event->attr.attr.size);
  334. event->attr.attr.config = bswap_64(event->attr.attr.config);
  335. event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period);
  336. event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type);
  337. event->attr.attr.read_format = bswap_64(event->attr.attr.read_format);
  338. event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events);
  339. event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type);
  340. event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr);
  341. event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len);
  342. size = event->header.size;
  343. size -= (void *)&event->attr.id - (void *)event;
  344. mem_bswap_64(event->attr.id, size);
  345. }
  346. static void perf_event__event_type_swap(union perf_event *event)
  347. {
  348. event->event_type.event_type.event_id =
  349. bswap_64(event->event_type.event_type.event_id);
  350. }
  351. static void perf_event__tracing_data_swap(union perf_event *event)
  352. {
  353. event->tracing_data.size = bswap_32(event->tracing_data.size);
  354. }
  355. typedef void (*perf_event__swap_op)(union perf_event *event);
  356. static perf_event__swap_op perf_event__swap_ops[] = {
  357. [PERF_RECORD_MMAP] = perf_event__mmap_swap,
  358. [PERF_RECORD_COMM] = perf_event__comm_swap,
  359. [PERF_RECORD_FORK] = perf_event__task_swap,
  360. [PERF_RECORD_EXIT] = perf_event__task_swap,
  361. [PERF_RECORD_LOST] = perf_event__all64_swap,
  362. [PERF_RECORD_READ] = perf_event__read_swap,
  363. [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
  364. [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap,
  365. [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
  366. [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
  367. [PERF_RECORD_HEADER_BUILD_ID] = NULL,
  368. [PERF_RECORD_HEADER_MAX] = NULL,
  369. };
  370. struct sample_queue {
  371. u64 timestamp;
  372. u64 file_offset;
  373. union perf_event *event;
  374. struct list_head list;
  375. };
  376. static void perf_session_free_sample_buffers(struct perf_session *session)
  377. {
  378. struct ordered_samples *os = &session->ordered_samples;
  379. while (!list_empty(&os->to_free)) {
  380. struct sample_queue *sq;
  381. sq = list_entry(os->to_free.next, struct sample_queue, list);
  382. list_del(&sq->list);
  383. free(sq);
  384. }
  385. }
  386. static int perf_session_deliver_event(struct perf_session *session,
  387. union perf_event *event,
  388. struct perf_sample *sample,
  389. struct perf_event_ops *ops,
  390. u64 file_offset);
  391. static void flush_sample_queue(struct perf_session *s,
  392. struct perf_event_ops *ops)
  393. {
  394. struct ordered_samples *os = &s->ordered_samples;
  395. struct list_head *head = &os->samples;
  396. struct sample_queue *tmp, *iter;
  397. struct perf_sample sample;
  398. u64 limit = os->next_flush;
  399. u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
  400. if (!ops->ordered_samples || !limit)
  401. return;
  402. list_for_each_entry_safe(iter, tmp, head, list) {
  403. if (iter->timestamp > limit)
  404. break;
  405. perf_session__parse_sample(s, iter->event, &sample);
  406. perf_session_deliver_event(s, iter->event, &sample, ops,
  407. iter->file_offset);
  408. os->last_flush = iter->timestamp;
  409. list_del(&iter->list);
  410. list_add(&iter->list, &os->sample_cache);
  411. }
  412. if (list_empty(head)) {
  413. os->last_sample = NULL;
  414. } else if (last_ts <= limit) {
  415. os->last_sample =
  416. list_entry(head->prev, struct sample_queue, list);
  417. }
  418. }
  419. /*
  420. * When perf record finishes a pass on every buffers, it records this pseudo
  421. * event.
  422. * We record the max timestamp t found in the pass n.
  423. * Assuming these timestamps are monotonic across cpus, we know that if
  424. * a buffer still has events with timestamps below t, they will be all
  425. * available and then read in the pass n + 1.
  426. * Hence when we start to read the pass n + 2, we can safely flush every
  427. * events with timestamps below t.
  428. *
  429. * ============ PASS n =================
  430. * CPU 0 | CPU 1
  431. * |
  432. * cnt1 timestamps | cnt2 timestamps
  433. * 1 | 2
  434. * 2 | 3
  435. * - | 4 <--- max recorded
  436. *
  437. * ============ PASS n + 1 ==============
  438. * CPU 0 | CPU 1
  439. * |
  440. * cnt1 timestamps | cnt2 timestamps
  441. * 3 | 5
  442. * 4 | 6
  443. * 5 | 7 <---- max recorded
  444. *
  445. * Flush every events below timestamp 4
  446. *
  447. * ============ PASS n + 2 ==============
  448. * CPU 0 | CPU 1
  449. * |
  450. * cnt1 timestamps | cnt2 timestamps
  451. * 6 | 8
  452. * 7 | 9
  453. * - | 10
  454. *
  455. * Flush every events below timestamp 7
  456. * etc...
  457. */
  458. static int process_finished_round(union perf_event *event __used,
  459. struct perf_session *session,
  460. struct perf_event_ops *ops)
  461. {
  462. flush_sample_queue(session, ops);
  463. session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
  464. return 0;
  465. }
  466. /* The queue is ordered by time */
  467. static void __queue_event(struct sample_queue *new, struct perf_session *s)
  468. {
  469. struct ordered_samples *os = &s->ordered_samples;
  470. struct sample_queue *sample = os->last_sample;
  471. u64 timestamp = new->timestamp;
  472. struct list_head *p;
  473. os->last_sample = new;
  474. if (!sample) {
  475. list_add(&new->list, &os->samples);
  476. os->max_timestamp = timestamp;
  477. return;
  478. }
  479. /*
  480. * last_sample might point to some random place in the list as it's
  481. * the last queued event. We expect that the new event is close to
  482. * this.
  483. */
  484. if (sample->timestamp <= timestamp) {
  485. while (sample->timestamp <= timestamp) {
  486. p = sample->list.next;
  487. if (p == &os->samples) {
  488. list_add_tail(&new->list, &os->samples);
  489. os->max_timestamp = timestamp;
  490. return;
  491. }
  492. sample = list_entry(p, struct sample_queue, list);
  493. }
  494. list_add_tail(&new->list, &sample->list);
  495. } else {
  496. while (sample->timestamp > timestamp) {
  497. p = sample->list.prev;
  498. if (p == &os->samples) {
  499. list_add(&new->list, &os->samples);
  500. return;
  501. }
  502. sample = list_entry(p, struct sample_queue, list);
  503. }
  504. list_add(&new->list, &sample->list);
  505. }
  506. }
  507. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
  508. static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
  509. struct perf_sample *sample, u64 file_offset)
  510. {
  511. struct ordered_samples *os = &s->ordered_samples;
  512. struct list_head *sc = &os->sample_cache;
  513. u64 timestamp = sample->time;
  514. struct sample_queue *new;
  515. if (!timestamp || timestamp == ~0ULL)
  516. return -ETIME;
  517. if (timestamp < s->ordered_samples.last_flush) {
  518. printf("Warning: Timestamp below last timeslice flush\n");
  519. return -EINVAL;
  520. }
  521. if (!list_empty(sc)) {
  522. new = list_entry(sc->next, struct sample_queue, list);
  523. list_del(&new->list);
  524. } else if (os->sample_buffer) {
  525. new = os->sample_buffer + os->sample_buffer_idx;
  526. if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
  527. os->sample_buffer = NULL;
  528. } else {
  529. os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
  530. if (!os->sample_buffer)
  531. return -ENOMEM;
  532. list_add(&os->sample_buffer->list, &os->to_free);
  533. os->sample_buffer_idx = 2;
  534. new = os->sample_buffer + 1;
  535. }
  536. new->timestamp = timestamp;
  537. new->file_offset = file_offset;
  538. new->event = event;
  539. __queue_event(new, s);
  540. return 0;
  541. }
  542. static void callchain__printf(struct perf_sample *sample)
  543. {
  544. unsigned int i;
  545. printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
  546. for (i = 0; i < sample->callchain->nr; i++)
  547. printf("..... %2d: %016" PRIx64 "\n",
  548. i, sample->callchain->ips[i]);
  549. }
  550. static void perf_session__print_tstamp(struct perf_session *session,
  551. union perf_event *event,
  552. struct perf_sample *sample)
  553. {
  554. if (event->header.type != PERF_RECORD_SAMPLE &&
  555. !session->sample_id_all) {
  556. fputs("-1 -1 ", stdout);
  557. return;
  558. }
  559. if ((session->sample_type & PERF_SAMPLE_CPU))
  560. printf("%u ", sample->cpu);
  561. if (session->sample_type & PERF_SAMPLE_TIME)
  562. printf("%" PRIu64 " ", sample->time);
  563. }
  564. static void dump_event(struct perf_session *session, union perf_event *event,
  565. u64 file_offset, struct perf_sample *sample)
  566. {
  567. if (!dump_trace)
  568. return;
  569. printf("\n%#" PRIx64 " [%#x]: event: %d\n",
  570. file_offset, event->header.size, event->header.type);
  571. trace_event(event);
  572. if (sample)
  573. perf_session__print_tstamp(session, event, sample);
  574. printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
  575. event->header.size, perf_event__name(event->header.type));
  576. }
  577. static void dump_sample(struct perf_session *session, union perf_event *event,
  578. struct perf_sample *sample)
  579. {
  580. if (!dump_trace)
  581. return;
  582. printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
  583. event->header.misc, sample->pid, sample->tid, sample->ip,
  584. sample->period);
  585. if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
  586. callchain__printf(sample);
  587. }
  588. static int perf_session_deliver_event(struct perf_session *session,
  589. union perf_event *event,
  590. struct perf_sample *sample,
  591. struct perf_event_ops *ops,
  592. u64 file_offset)
  593. {
  594. dump_event(session, event, file_offset, sample);
  595. switch (event->header.type) {
  596. case PERF_RECORD_SAMPLE:
  597. dump_sample(session, event, sample);
  598. return ops->sample(event, sample, session);
  599. case PERF_RECORD_MMAP:
  600. return ops->mmap(event, sample, session);
  601. case PERF_RECORD_COMM:
  602. return ops->comm(event, sample, session);
  603. case PERF_RECORD_FORK:
  604. return ops->fork(event, sample, session);
  605. case PERF_RECORD_EXIT:
  606. return ops->exit(event, sample, session);
  607. case PERF_RECORD_LOST:
  608. return ops->lost(event, sample, session);
  609. case PERF_RECORD_READ:
  610. return ops->read(event, sample, session);
  611. case PERF_RECORD_THROTTLE:
  612. return ops->throttle(event, sample, session);
  613. case PERF_RECORD_UNTHROTTLE:
  614. return ops->unthrottle(event, sample, session);
  615. default:
  616. ++session->hists.stats.nr_unknown_events;
  617. return -1;
  618. }
  619. }
  620. static int perf_session__preprocess_sample(struct perf_session *session,
  621. union perf_event *event, struct perf_sample *sample)
  622. {
  623. if (event->header.type != PERF_RECORD_SAMPLE ||
  624. !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
  625. return 0;
  626. if (!ip_callchain__valid(sample->callchain, event)) {
  627. pr_debug("call-chain problem with event, skipping it.\n");
  628. ++session->hists.stats.nr_invalid_chains;
  629. session->hists.stats.total_invalid_chains += sample->period;
  630. return -EINVAL;
  631. }
  632. return 0;
  633. }
  634. static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
  635. struct perf_event_ops *ops, u64 file_offset)
  636. {
  637. dump_event(session, event, file_offset, NULL);
  638. /* These events are processed right away */
  639. switch (event->header.type) {
  640. case PERF_RECORD_HEADER_ATTR:
  641. return ops->attr(event, session);
  642. case PERF_RECORD_HEADER_EVENT_TYPE:
  643. return ops->event_type(event, session);
  644. case PERF_RECORD_HEADER_TRACING_DATA:
  645. /* setup for reading amidst mmap */
  646. lseek(session->fd, file_offset, SEEK_SET);
  647. return ops->tracing_data(event, session);
  648. case PERF_RECORD_HEADER_BUILD_ID:
  649. return ops->build_id(event, session);
  650. case PERF_RECORD_FINISHED_ROUND:
  651. return ops->finished_round(event, session, ops);
  652. default:
  653. return -EINVAL;
  654. }
  655. }
  656. static int perf_session__process_event(struct perf_session *session,
  657. union perf_event *event,
  658. struct perf_event_ops *ops,
  659. u64 file_offset)
  660. {
  661. struct perf_sample sample;
  662. int ret;
  663. if (session->header.needs_swap &&
  664. perf_event__swap_ops[event->header.type])
  665. perf_event__swap_ops[event->header.type](event);
  666. if (event->header.type >= PERF_RECORD_HEADER_MAX)
  667. return -EINVAL;
  668. hists__inc_nr_events(&session->hists, event->header.type);
  669. if (event->header.type >= PERF_RECORD_USER_TYPE_START)
  670. return perf_session__process_user_event(session, event, ops, file_offset);
  671. /*
  672. * For all kernel events we get the sample data
  673. */
  674. perf_session__parse_sample(session, event, &sample);
  675. /* Preprocess sample records - precheck callchains */
  676. if (perf_session__preprocess_sample(session, event, &sample))
  677. return 0;
  678. if (ops->ordered_samples) {
  679. ret = perf_session_queue_event(session, event, &sample,
  680. file_offset);
  681. if (ret != -ETIME)
  682. return ret;
  683. }
  684. return perf_session_deliver_event(session, event, &sample, ops,
  685. file_offset);
  686. }
  687. void perf_event_header__bswap(struct perf_event_header *self)
  688. {
  689. self->type = bswap_32(self->type);
  690. self->misc = bswap_16(self->misc);
  691. self->size = bswap_16(self->size);
  692. }
  693. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  694. {
  695. struct thread *thread = perf_session__findnew(self, 0);
  696. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  697. pr_err("problem inserting idle task.\n");
  698. thread = NULL;
  699. }
  700. return thread;
  701. }
  702. static void perf_session__warn_about_errors(const struct perf_session *session,
  703. const struct perf_event_ops *ops)
  704. {
  705. if (ops->lost == perf_event__process_lost &&
  706. session->hists.stats.total_lost != 0) {
  707. ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
  708. "!\n\nCheck IO/CPU overload!\n\n",
  709. session->hists.stats.total_period,
  710. session->hists.stats.total_lost);
  711. }
  712. if (session->hists.stats.nr_unknown_events != 0) {
  713. ui__warning("Found %u unknown events!\n\n"
  714. "Is this an older tool processing a perf.data "
  715. "file generated by a more recent tool?\n\n"
  716. "If that is not the case, consider "
  717. "reporting to linux-kernel@vger.kernel.org.\n\n",
  718. session->hists.stats.nr_unknown_events);
  719. }
  720. if (session->hists.stats.nr_invalid_chains != 0) {
  721. ui__warning("Found invalid callchains!\n\n"
  722. "%u out of %u events were discarded for this reason.\n\n"
  723. "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
  724. session->hists.stats.nr_invalid_chains,
  725. session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
  726. }
  727. }
  728. #define session_done() (*(volatile int *)(&session_done))
  729. volatile int session_done;
  730. static int __perf_session__process_pipe_events(struct perf_session *self,
  731. struct perf_event_ops *ops)
  732. {
  733. union perf_event event;
  734. uint32_t size;
  735. int skip = 0;
  736. u64 head;
  737. int err;
  738. void *p;
  739. perf_event_ops__fill_defaults(ops);
  740. head = 0;
  741. more:
  742. err = readn(self->fd, &event, sizeof(struct perf_event_header));
  743. if (err <= 0) {
  744. if (err == 0)
  745. goto done;
  746. pr_err("failed to read event header\n");
  747. goto out_err;
  748. }
  749. if (self->header.needs_swap)
  750. perf_event_header__bswap(&event.header);
  751. size = event.header.size;
  752. if (size == 0)
  753. size = 8;
  754. p = &event;
  755. p += sizeof(struct perf_event_header);
  756. if (size - sizeof(struct perf_event_header)) {
  757. err = readn(self->fd, p, size - sizeof(struct perf_event_header));
  758. if (err <= 0) {
  759. if (err == 0) {
  760. pr_err("unexpected end of event stream\n");
  761. goto done;
  762. }
  763. pr_err("failed to read event data\n");
  764. goto out_err;
  765. }
  766. }
  767. if (size == 0 ||
  768. (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
  769. dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
  770. head, event.header.size, event.header.type);
  771. /*
  772. * assume we lost track of the stream, check alignment, and
  773. * increment a single u64 in the hope to catch on again 'soon'.
  774. */
  775. if (unlikely(head & 7))
  776. head &= ~7ULL;
  777. size = 8;
  778. }
  779. head += size;
  780. if (skip > 0)
  781. head += skip;
  782. if (!session_done())
  783. goto more;
  784. done:
  785. err = 0;
  786. out_err:
  787. perf_session__warn_about_errors(self, ops);
  788. perf_session_free_sample_buffers(self);
  789. return err;
  790. }
  791. int __perf_session__process_events(struct perf_session *session,
  792. u64 data_offset, u64 data_size,
  793. u64 file_size, struct perf_event_ops *ops)
  794. {
  795. u64 head, page_offset, file_offset, file_pos, progress_next;
  796. int err, mmap_prot, mmap_flags, map_idx = 0;
  797. struct ui_progress *progress;
  798. size_t page_size, mmap_size;
  799. char *buf, *mmaps[8];
  800. union perf_event *event;
  801. uint32_t size;
  802. perf_event_ops__fill_defaults(ops);
  803. page_size = sysconf(_SC_PAGESIZE);
  804. page_offset = page_size * (data_offset / page_size);
  805. file_offset = page_offset;
  806. head = data_offset - page_offset;
  807. if (data_offset + data_size < file_size)
  808. file_size = data_offset + data_size;
  809. progress_next = file_size / 16;
  810. progress = ui_progress__new("Processing events...", file_size);
  811. if (progress == NULL)
  812. return -1;
  813. mmap_size = session->mmap_window;
  814. if (mmap_size > file_size)
  815. mmap_size = file_size;
  816. memset(mmaps, 0, sizeof(mmaps));
  817. mmap_prot = PROT_READ;
  818. mmap_flags = MAP_SHARED;
  819. if (session->header.needs_swap) {
  820. mmap_prot |= PROT_WRITE;
  821. mmap_flags = MAP_PRIVATE;
  822. }
  823. remap:
  824. buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
  825. file_offset);
  826. if (buf == MAP_FAILED) {
  827. pr_err("failed to mmap file\n");
  828. err = -errno;
  829. goto out_err;
  830. }
  831. mmaps[map_idx] = buf;
  832. map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
  833. file_pos = file_offset + head;
  834. more:
  835. event = (union perf_event *)(buf + head);
  836. if (session->header.needs_swap)
  837. perf_event_header__bswap(&event->header);
  838. size = event->header.size;
  839. if (size == 0)
  840. size = 8;
  841. if (head + event->header.size > mmap_size) {
  842. if (mmaps[map_idx]) {
  843. munmap(mmaps[map_idx], mmap_size);
  844. mmaps[map_idx] = NULL;
  845. }
  846. page_offset = page_size * (head / page_size);
  847. file_offset += page_offset;
  848. head -= page_offset;
  849. goto remap;
  850. }
  851. size = event->header.size;
  852. if (size == 0 ||
  853. perf_session__process_event(session, event, ops, file_pos) < 0) {
  854. dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
  855. file_offset + head, event->header.size,
  856. event->header.type);
  857. /*
  858. * assume we lost track of the stream, check alignment, and
  859. * increment a single u64 in the hope to catch on again 'soon'.
  860. */
  861. if (unlikely(head & 7))
  862. head &= ~7ULL;
  863. size = 8;
  864. }
  865. head += size;
  866. file_pos += size;
  867. if (file_pos >= progress_next) {
  868. progress_next += file_size / 16;
  869. ui_progress__update(progress, file_pos);
  870. }
  871. if (file_pos < file_size)
  872. goto more;
  873. err = 0;
  874. /* do the final flush for ordered samples */
  875. session->ordered_samples.next_flush = ULLONG_MAX;
  876. flush_sample_queue(session, ops);
  877. out_err:
  878. ui_progress__delete(progress);
  879. perf_session__warn_about_errors(session, ops);
  880. perf_session_free_sample_buffers(session);
  881. return err;
  882. }
  883. int perf_session__process_events(struct perf_session *self,
  884. struct perf_event_ops *ops)
  885. {
  886. int err;
  887. if (perf_session__register_idle_thread(self) == NULL)
  888. return -ENOMEM;
  889. if (!self->fd_pipe)
  890. err = __perf_session__process_events(self,
  891. self->header.data_offset,
  892. self->header.data_size,
  893. self->size, ops);
  894. else
  895. err = __perf_session__process_pipe_events(self, ops);
  896. return err;
  897. }
  898. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  899. {
  900. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  901. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  902. return false;
  903. }
  904. return true;
  905. }
  906. int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
  907. const char *symbol_name,
  908. u64 addr)
  909. {
  910. char *bracket;
  911. enum map_type i;
  912. struct ref_reloc_sym *ref;
  913. ref = zalloc(sizeof(struct ref_reloc_sym));
  914. if (ref == NULL)
  915. return -ENOMEM;
  916. ref->name = strdup(symbol_name);
  917. if (ref->name == NULL) {
  918. free(ref);
  919. return -ENOMEM;
  920. }
  921. bracket = strchr(ref->name, ']');
  922. if (bracket)
  923. *bracket = '\0';
  924. ref->addr = addr;
  925. for (i = 0; i < MAP__NR_TYPES; ++i) {
  926. struct kmap *kmap = map__kmap(maps[i]);
  927. kmap->ref_reloc_sym = ref;
  928. }
  929. return 0;
  930. }
  931. size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
  932. {
  933. return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
  934. __dsos__fprintf(&self->host_machine.user_dsos, fp) +
  935. machines__fprintf_dsos(&self->machines, fp);
  936. }
  937. size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
  938. bool with_hits)
  939. {
  940. size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
  941. return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
  942. }
  943. size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
  944. {
  945. struct perf_evsel *pos;
  946. size_t ret = fprintf(fp, "Aggregated stats:\n");
  947. ret += hists__fprintf_nr_events(&session->hists, fp);
  948. list_for_each_entry(pos, &session->evlist->entries, node) {
  949. ret += fprintf(fp, "%s stats:\n", event_name(pos));
  950. ret += hists__fprintf_nr_events(&pos->hists, fp);
  951. }
  952. return ret;
  953. }
  954. void perf_session__print_symbols(union perf_event *event,
  955. struct perf_sample *sample,
  956. struct perf_session *session)
  957. {
  958. struct addr_location al;
  959. const char *symname, *dsoname;
  960. struct callchain_cursor *cursor = &session->callchain_cursor;
  961. struct callchain_cursor_node *node;
  962. if (perf_event__preprocess_sample(event, session, &al, sample,
  963. NULL) < 0) {
  964. error("problem processing %d event, skipping it.\n",
  965. event->header.type);
  966. return;
  967. }
  968. if (symbol_conf.use_callchain && sample->callchain) {
  969. if (perf_session__resolve_callchain(session, al.thread,
  970. sample->callchain, NULL) != 0) {
  971. if (verbose)
  972. error("Failed to resolve callchain. Skipping\n");
  973. return;
  974. }
  975. callchain_cursor_commit(cursor);
  976. while (1) {
  977. node = callchain_cursor_current(cursor);
  978. if (!node)
  979. break;
  980. if (node->sym && node->sym->name)
  981. symname = node->sym->name;
  982. else
  983. symname = "";
  984. if (node->map && node->map->dso && node->map->dso->name)
  985. dsoname = node->map->dso->name;
  986. else
  987. dsoname = "";
  988. printf("\t%16" PRIx64 " %s (%s)\n", node->ip, symname, dsoname);
  989. callchain_cursor_advance(cursor);
  990. }
  991. } else {
  992. if (al.sym && al.sym->name)
  993. symname = al.sym->name;
  994. else
  995. symname = "";
  996. if (al.map && al.map->dso && al.map->dso->name)
  997. dsoname = al.map->dso->name;
  998. else
  999. dsoname = "";
  1000. printf("%16" PRIx64 " %s (%s)", al.addr, symname, dsoname);
  1001. }
  1002. }