session.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. #define _FILE_OFFSET_BITS 64
  2. #include <linux/kernel.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include <sys/mman.h>
  7. #include "session.h"
  8. #include "sort.h"
  9. #include "util.h"
  10. static int perf_session__open(struct perf_session *self, bool force)
  11. {
  12. struct stat input_stat;
  13. if (!strcmp(self->filename, "-")) {
  14. self->fd_pipe = true;
  15. self->fd = STDIN_FILENO;
  16. if (perf_header__read(self, self->fd) < 0)
  17. pr_err("incompatible file format");
  18. return 0;
  19. }
  20. self->fd = open(self->filename, O_RDONLY);
  21. if (self->fd < 0) {
  22. pr_err("failed to open file: %s", self->filename);
  23. if (!strcmp(self->filename, "perf.data"))
  24. pr_err(" (try 'perf record' first)");
  25. pr_err("\n");
  26. return -errno;
  27. }
  28. if (fstat(self->fd, &input_stat) < 0)
  29. goto out_close;
  30. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  31. pr_err("file %s not owned by current user or root\n",
  32. self->filename);
  33. goto out_close;
  34. }
  35. if (!input_stat.st_size) {
  36. pr_info("zero-sized file (%s), nothing to do!\n",
  37. self->filename);
  38. goto out_close;
  39. }
  40. if (perf_header__read(self, self->fd) < 0) {
  41. pr_err("incompatible file format");
  42. goto out_close;
  43. }
  44. self->size = input_stat.st_size;
  45. return 0;
  46. out_close:
  47. close(self->fd);
  48. self->fd = -1;
  49. return -1;
  50. }
  51. void perf_session__update_sample_type(struct perf_session *self)
  52. {
  53. self->sample_type = perf_header__sample_type(&self->header);
  54. }
  55. int perf_session__create_kernel_maps(struct perf_session *self)
  56. {
  57. int ret = machine__create_kernel_maps(&self->host_machine);
  58. if (ret >= 0)
  59. ret = machines__create_guest_kernel_maps(&self->machines);
  60. return ret;
  61. }
  62. struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
  63. {
  64. size_t len = filename ? strlen(filename) + 1 : 0;
  65. struct perf_session *self = zalloc(sizeof(*self) + len);
  66. if (self == NULL)
  67. goto out;
  68. if (perf_header__init(&self->header) < 0)
  69. goto out_free;
  70. memcpy(self->filename, filename, len);
  71. self->threads = RB_ROOT;
  72. INIT_LIST_HEAD(&self->dead_threads);
  73. self->hists_tree = RB_ROOT;
  74. self->last_match = NULL;
  75. self->mmap_window = 32;
  76. self->cwd = NULL;
  77. self->cwdlen = 0;
  78. self->machines = RB_ROOT;
  79. self->repipe = repipe;
  80. INIT_LIST_HEAD(&self->ordered_samples.samples_head);
  81. machine__init(&self->host_machine, "", HOST_KERNEL_ID);
  82. if (mode == O_RDONLY) {
  83. if (perf_session__open(self, force) < 0)
  84. goto out_delete;
  85. } else if (mode == O_WRONLY) {
  86. /*
  87. * In O_RDONLY mode this will be performed when reading the
  88. * kernel MMAP event, in event__process_mmap().
  89. */
  90. if (perf_session__create_kernel_maps(self) < 0)
  91. goto out_delete;
  92. }
  93. perf_session__update_sample_type(self);
  94. out:
  95. return self;
  96. out_free:
  97. free(self);
  98. return NULL;
  99. out_delete:
  100. perf_session__delete(self);
  101. return NULL;
  102. }
  103. void perf_session__delete(struct perf_session *self)
  104. {
  105. perf_header__exit(&self->header);
  106. close(self->fd);
  107. free(self->cwd);
  108. free(self);
  109. }
  110. void perf_session__remove_thread(struct perf_session *self, struct thread *th)
  111. {
  112. rb_erase(&th->rb_node, &self->threads);
  113. /*
  114. * We may have references to this thread, for instance in some hist_entry
  115. * instances, so just move them to a separate list.
  116. */
  117. list_add_tail(&th->node, &self->dead_threads);
  118. }
  119. static bool symbol__match_parent_regex(struct symbol *sym)
  120. {
  121. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  122. return 1;
  123. return 0;
  124. }
  125. struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
  126. struct thread *thread,
  127. struct ip_callchain *chain,
  128. struct symbol **parent)
  129. {
  130. u8 cpumode = PERF_RECORD_MISC_USER;
  131. unsigned int i;
  132. struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
  133. if (!syms)
  134. return NULL;
  135. for (i = 0; i < chain->nr; i++) {
  136. u64 ip = chain->ips[i];
  137. struct addr_location al;
  138. if (ip >= PERF_CONTEXT_MAX) {
  139. switch (ip) {
  140. case PERF_CONTEXT_HV:
  141. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  142. case PERF_CONTEXT_KERNEL:
  143. cpumode = PERF_RECORD_MISC_KERNEL; break;
  144. case PERF_CONTEXT_USER:
  145. cpumode = PERF_RECORD_MISC_USER; break;
  146. default:
  147. break;
  148. }
  149. continue;
  150. }
  151. al.filtered = false;
  152. thread__find_addr_location(thread, self, cpumode,
  153. MAP__FUNCTION, thread->pid, ip, &al, NULL);
  154. if (al.sym != NULL) {
  155. if (sort__has_parent && !*parent &&
  156. symbol__match_parent_regex(al.sym))
  157. *parent = al.sym;
  158. if (!symbol_conf.use_callchain)
  159. break;
  160. syms[i].map = al.map;
  161. syms[i].sym = al.sym;
  162. }
  163. }
  164. return syms;
  165. }
  166. static int process_event_stub(event_t *event __used,
  167. struct perf_session *session __used)
  168. {
  169. dump_printf(": unhandled!\n");
  170. return 0;
  171. }
  172. static int process_finished_round_stub(event_t *event __used,
  173. struct perf_session *session __used,
  174. struct perf_event_ops *ops __used)
  175. {
  176. dump_printf(": unhandled!\n");
  177. return 0;
  178. }
  179. static int process_finished_round(event_t *event,
  180. struct perf_session *session,
  181. struct perf_event_ops *ops);
  182. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  183. {
  184. if (handler->sample == NULL)
  185. handler->sample = process_event_stub;
  186. if (handler->mmap == NULL)
  187. handler->mmap = process_event_stub;
  188. if (handler->comm == NULL)
  189. handler->comm = process_event_stub;
  190. if (handler->fork == NULL)
  191. handler->fork = process_event_stub;
  192. if (handler->exit == NULL)
  193. handler->exit = process_event_stub;
  194. if (handler->lost == NULL)
  195. handler->lost = process_event_stub;
  196. if (handler->read == NULL)
  197. handler->read = process_event_stub;
  198. if (handler->throttle == NULL)
  199. handler->throttle = process_event_stub;
  200. if (handler->unthrottle == NULL)
  201. handler->unthrottle = process_event_stub;
  202. if (handler->attr == NULL)
  203. handler->attr = process_event_stub;
  204. if (handler->event_type == NULL)
  205. handler->event_type = process_event_stub;
  206. if (handler->tracing_data == NULL)
  207. handler->tracing_data = process_event_stub;
  208. if (handler->build_id == NULL)
  209. handler->build_id = process_event_stub;
  210. if (handler->finished_round == NULL) {
  211. if (handler->ordered_samples)
  212. handler->finished_round = process_finished_round;
  213. else
  214. handler->finished_round = process_finished_round_stub;
  215. }
  216. }
  217. void mem_bswap_64(void *src, int byte_size)
  218. {
  219. u64 *m = src;
  220. while (byte_size > 0) {
  221. *m = bswap_64(*m);
  222. byte_size -= sizeof(u64);
  223. ++m;
  224. }
  225. }
  226. static void event__all64_swap(event_t *self)
  227. {
  228. struct perf_event_header *hdr = &self->header;
  229. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  230. }
  231. static void event__comm_swap(event_t *self)
  232. {
  233. self->comm.pid = bswap_32(self->comm.pid);
  234. self->comm.tid = bswap_32(self->comm.tid);
  235. }
  236. static void event__mmap_swap(event_t *self)
  237. {
  238. self->mmap.pid = bswap_32(self->mmap.pid);
  239. self->mmap.tid = bswap_32(self->mmap.tid);
  240. self->mmap.start = bswap_64(self->mmap.start);
  241. self->mmap.len = bswap_64(self->mmap.len);
  242. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  243. }
  244. static void event__task_swap(event_t *self)
  245. {
  246. self->fork.pid = bswap_32(self->fork.pid);
  247. self->fork.tid = bswap_32(self->fork.tid);
  248. self->fork.ppid = bswap_32(self->fork.ppid);
  249. self->fork.ptid = bswap_32(self->fork.ptid);
  250. self->fork.time = bswap_64(self->fork.time);
  251. }
  252. static void event__read_swap(event_t *self)
  253. {
  254. self->read.pid = bswap_32(self->read.pid);
  255. self->read.tid = bswap_32(self->read.tid);
  256. self->read.value = bswap_64(self->read.value);
  257. self->read.time_enabled = bswap_64(self->read.time_enabled);
  258. self->read.time_running = bswap_64(self->read.time_running);
  259. self->read.id = bswap_64(self->read.id);
  260. }
  261. static void event__attr_swap(event_t *self)
  262. {
  263. size_t size;
  264. self->attr.attr.type = bswap_32(self->attr.attr.type);
  265. self->attr.attr.size = bswap_32(self->attr.attr.size);
  266. self->attr.attr.config = bswap_64(self->attr.attr.config);
  267. self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
  268. self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
  269. self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
  270. self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
  271. self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
  272. self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
  273. self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
  274. size = self->header.size;
  275. size -= (void *)&self->attr.id - (void *)self;
  276. mem_bswap_64(self->attr.id, size);
  277. }
  278. static void event__event_type_swap(event_t *self)
  279. {
  280. self->event_type.event_type.event_id =
  281. bswap_64(self->event_type.event_type.event_id);
  282. }
  283. static void event__tracing_data_swap(event_t *self)
  284. {
  285. self->tracing_data.size = bswap_32(self->tracing_data.size);
  286. }
  287. typedef void (*event__swap_op)(event_t *self);
  288. static event__swap_op event__swap_ops[] = {
  289. [PERF_RECORD_MMAP] = event__mmap_swap,
  290. [PERF_RECORD_COMM] = event__comm_swap,
  291. [PERF_RECORD_FORK] = event__task_swap,
  292. [PERF_RECORD_EXIT] = event__task_swap,
  293. [PERF_RECORD_LOST] = event__all64_swap,
  294. [PERF_RECORD_READ] = event__read_swap,
  295. [PERF_RECORD_SAMPLE] = event__all64_swap,
  296. [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
  297. [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
  298. [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
  299. [PERF_RECORD_HEADER_BUILD_ID] = NULL,
  300. [PERF_RECORD_HEADER_MAX] = NULL,
  301. };
  302. struct sample_queue {
  303. u64 timestamp;
  304. struct sample_event *event;
  305. struct list_head list;
  306. };
  307. static void flush_sample_queue(struct perf_session *s,
  308. struct perf_event_ops *ops)
  309. {
  310. struct list_head *head = &s->ordered_samples.samples_head;
  311. u64 limit = s->ordered_samples.next_flush;
  312. struct sample_queue *tmp, *iter;
  313. if (!ops->ordered_samples || !limit)
  314. return;
  315. list_for_each_entry_safe(iter, tmp, head, list) {
  316. if (iter->timestamp > limit)
  317. return;
  318. if (iter == s->ordered_samples.last_inserted)
  319. s->ordered_samples.last_inserted = NULL;
  320. ops->sample((event_t *)iter->event, s);
  321. s->ordered_samples.last_flush = iter->timestamp;
  322. list_del(&iter->list);
  323. free(iter->event);
  324. free(iter);
  325. }
  326. }
  327. /*
  328. * When perf record finishes a pass on every buffers, it records this pseudo
  329. * event.
  330. * We record the max timestamp t found in the pass n.
  331. * Assuming these timestamps are monotonic across cpus, we know that if
  332. * a buffer still has events with timestamps below t, they will be all
  333. * available and then read in the pass n + 1.
  334. * Hence when we start to read the pass n + 2, we can safely flush every
  335. * events with timestamps below t.
  336. *
  337. * ============ PASS n =================
  338. * CPU 0 | CPU 1
  339. * |
  340. * cnt1 timestamps | cnt2 timestamps
  341. * 1 | 2
  342. * 2 | 3
  343. * - | 4 <--- max recorded
  344. *
  345. * ============ PASS n + 1 ==============
  346. * CPU 0 | CPU 1
  347. * |
  348. * cnt1 timestamps | cnt2 timestamps
  349. * 3 | 5
  350. * 4 | 6
  351. * 5 | 7 <---- max recorded
  352. *
  353. * Flush every events below timestamp 4
  354. *
  355. * ============ PASS n + 2 ==============
  356. * CPU 0 | CPU 1
  357. * |
  358. * cnt1 timestamps | cnt2 timestamps
  359. * 6 | 8
  360. * 7 | 9
  361. * - | 10
  362. *
  363. * Flush every events below timestamp 7
  364. * etc...
  365. */
  366. static int process_finished_round(event_t *event __used,
  367. struct perf_session *session,
  368. struct perf_event_ops *ops)
  369. {
  370. flush_sample_queue(session, ops);
  371. session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
  372. return 0;
  373. }
  374. static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
  375. {
  376. struct sample_queue *iter;
  377. list_for_each_entry_reverse(iter, head, list) {
  378. if (iter->timestamp < new->timestamp) {
  379. list_add(&new->list, &iter->list);
  380. return;
  381. }
  382. }
  383. list_add(&new->list, head);
  384. }
  385. static void __queue_sample_before(struct sample_queue *new,
  386. struct sample_queue *iter,
  387. struct list_head *head)
  388. {
  389. list_for_each_entry_continue_reverse(iter, head, list) {
  390. if (iter->timestamp < new->timestamp) {
  391. list_add(&new->list, &iter->list);
  392. return;
  393. }
  394. }
  395. list_add(&new->list, head);
  396. }
  397. static void __queue_sample_after(struct sample_queue *new,
  398. struct sample_queue *iter,
  399. struct list_head *head)
  400. {
  401. list_for_each_entry_continue(iter, head, list) {
  402. if (iter->timestamp > new->timestamp) {
  403. list_add_tail(&new->list, &iter->list);
  404. return;
  405. }
  406. }
  407. list_add_tail(&new->list, head);
  408. }
  409. /* The queue is ordered by time */
  410. static void __queue_sample_event(struct sample_queue *new,
  411. struct perf_session *s)
  412. {
  413. struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
  414. struct list_head *head = &s->ordered_samples.samples_head;
  415. if (!last_inserted) {
  416. __queue_sample_end(new, head);
  417. return;
  418. }
  419. /*
  420. * Most of the time the current event has a timestamp
  421. * very close to the last event inserted, unless we just switched
  422. * to another event buffer. Having a sorting based on a list and
  423. * on the last inserted event that is close to the current one is
  424. * probably more efficient than an rbtree based sorting.
  425. */
  426. if (last_inserted->timestamp >= new->timestamp)
  427. __queue_sample_before(new, last_inserted, head);
  428. else
  429. __queue_sample_after(new, last_inserted, head);
  430. }
  431. static int queue_sample_event(event_t *event, struct sample_data *data,
  432. struct perf_session *s)
  433. {
  434. u64 timestamp = data->time;
  435. struct sample_queue *new;
  436. if (timestamp < s->ordered_samples.last_flush) {
  437. printf("Warning: Timestamp below last timeslice flush\n");
  438. return -EINVAL;
  439. }
  440. new = malloc(sizeof(*new));
  441. if (!new)
  442. return -ENOMEM;
  443. new->timestamp = timestamp;
  444. new->event = malloc(event->header.size);
  445. if (!new->event) {
  446. free(new);
  447. return -ENOMEM;
  448. }
  449. memcpy(new->event, event, event->header.size);
  450. __queue_sample_event(new, s);
  451. s->ordered_samples.last_inserted = new;
  452. if (new->timestamp > s->ordered_samples.max_timestamp)
  453. s->ordered_samples.max_timestamp = new->timestamp;
  454. return 0;
  455. }
  456. static int perf_session__process_sample(event_t *event, struct perf_session *s,
  457. struct perf_event_ops *ops)
  458. {
  459. struct sample_data data;
  460. if (!ops->ordered_samples)
  461. return ops->sample(event, s);
  462. bzero(&data, sizeof(struct sample_data));
  463. event__parse_sample(event, s->sample_type, &data);
  464. queue_sample_event(event, &data, s);
  465. return 0;
  466. }
  467. static int perf_session__process_event(struct perf_session *self,
  468. event_t *event,
  469. struct perf_event_ops *ops,
  470. u64 offset, u64 head)
  471. {
  472. trace_event(event);
  473. if (event->header.type < PERF_RECORD_HEADER_MAX) {
  474. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  475. offset + head, event->header.size,
  476. event__name[event->header.type]);
  477. hists__inc_nr_events(&self->hists, event->header.type);
  478. }
  479. if (self->header.needs_swap && event__swap_ops[event->header.type])
  480. event__swap_ops[event->header.type](event);
  481. switch (event->header.type) {
  482. case PERF_RECORD_SAMPLE:
  483. return perf_session__process_sample(event, self, ops);
  484. case PERF_RECORD_MMAP:
  485. return ops->mmap(event, self);
  486. case PERF_RECORD_COMM:
  487. return ops->comm(event, self);
  488. case PERF_RECORD_FORK:
  489. return ops->fork(event, self);
  490. case PERF_RECORD_EXIT:
  491. return ops->exit(event, self);
  492. case PERF_RECORD_LOST:
  493. return ops->lost(event, self);
  494. case PERF_RECORD_READ:
  495. return ops->read(event, self);
  496. case PERF_RECORD_THROTTLE:
  497. return ops->throttle(event, self);
  498. case PERF_RECORD_UNTHROTTLE:
  499. return ops->unthrottle(event, self);
  500. case PERF_RECORD_HEADER_ATTR:
  501. return ops->attr(event, self);
  502. case PERF_RECORD_HEADER_EVENT_TYPE:
  503. return ops->event_type(event, self);
  504. case PERF_RECORD_HEADER_TRACING_DATA:
  505. /* setup for reading amidst mmap */
  506. lseek(self->fd, offset + head, SEEK_SET);
  507. return ops->tracing_data(event, self);
  508. case PERF_RECORD_HEADER_BUILD_ID:
  509. return ops->build_id(event, self);
  510. case PERF_RECORD_FINISHED_ROUND:
  511. return ops->finished_round(event, self, ops);
  512. default:
  513. ++self->hists.stats.nr_unknown_events;
  514. return -1;
  515. }
  516. }
  517. void perf_event_header__bswap(struct perf_event_header *self)
  518. {
  519. self->type = bswap_32(self->type);
  520. self->misc = bswap_16(self->misc);
  521. self->size = bswap_16(self->size);
  522. }
  523. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  524. {
  525. struct thread *thread = perf_session__findnew(self, 0);
  526. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  527. pr_err("problem inserting idle task.\n");
  528. thread = NULL;
  529. }
  530. return thread;
  531. }
  532. int do_read(int fd, void *buf, size_t size)
  533. {
  534. void *buf_start = buf;
  535. while (size) {
  536. int ret = read(fd, buf, size);
  537. if (ret <= 0)
  538. return ret;
  539. size -= ret;
  540. buf += ret;
  541. }
  542. return buf - buf_start;
  543. }
  544. #define session_done() (*(volatile int *)(&session_done))
  545. volatile int session_done;
  546. static int __perf_session__process_pipe_events(struct perf_session *self,
  547. struct perf_event_ops *ops)
  548. {
  549. event_t event;
  550. uint32_t size;
  551. int skip = 0;
  552. u64 head;
  553. int err;
  554. void *p;
  555. perf_event_ops__fill_defaults(ops);
  556. head = 0;
  557. more:
  558. err = do_read(self->fd, &event, sizeof(struct perf_event_header));
  559. if (err <= 0) {
  560. if (err == 0)
  561. goto done;
  562. pr_err("failed to read event header\n");
  563. goto out_err;
  564. }
  565. if (self->header.needs_swap)
  566. perf_event_header__bswap(&event.header);
  567. size = event.header.size;
  568. if (size == 0)
  569. size = 8;
  570. p = &event;
  571. p += sizeof(struct perf_event_header);
  572. if (size - sizeof(struct perf_event_header)) {
  573. err = do_read(self->fd, p,
  574. size - sizeof(struct perf_event_header));
  575. if (err <= 0) {
  576. if (err == 0) {
  577. pr_err("unexpected end of event stream\n");
  578. goto done;
  579. }
  580. pr_err("failed to read event data\n");
  581. goto out_err;
  582. }
  583. }
  584. if (size == 0 ||
  585. (skip = perf_session__process_event(self, &event, ops,
  586. 0, head)) < 0) {
  587. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  588. head, event.header.size, event.header.type);
  589. /*
  590. * assume we lost track of the stream, check alignment, and
  591. * increment a single u64 in the hope to catch on again 'soon'.
  592. */
  593. if (unlikely(head & 7))
  594. head &= ~7ULL;
  595. size = 8;
  596. }
  597. head += size;
  598. dump_printf("\n%#Lx [%#x]: event: %d\n",
  599. head, event.header.size, event.header.type);
  600. if (skip > 0)
  601. head += skip;
  602. if (!session_done())
  603. goto more;
  604. done:
  605. err = 0;
  606. out_err:
  607. return err;
  608. }
  609. int __perf_session__process_events(struct perf_session *self,
  610. u64 data_offset, u64 data_size,
  611. u64 file_size, struct perf_event_ops *ops)
  612. {
  613. int err, mmap_prot, mmap_flags;
  614. u64 head, shift;
  615. u64 offset = 0;
  616. size_t page_size;
  617. event_t *event;
  618. uint32_t size;
  619. char *buf;
  620. struct ui_progress *progress = ui_progress__new("Processing events...",
  621. self->size);
  622. if (progress == NULL)
  623. return -1;
  624. perf_event_ops__fill_defaults(ops);
  625. page_size = sysconf(_SC_PAGESIZE);
  626. head = data_offset;
  627. shift = page_size * (head / page_size);
  628. offset += shift;
  629. head -= shift;
  630. mmap_prot = PROT_READ;
  631. mmap_flags = MAP_SHARED;
  632. if (self->header.needs_swap) {
  633. mmap_prot |= PROT_WRITE;
  634. mmap_flags = MAP_PRIVATE;
  635. }
  636. remap:
  637. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  638. mmap_flags, self->fd, offset);
  639. if (buf == MAP_FAILED) {
  640. pr_err("failed to mmap file\n");
  641. err = -errno;
  642. goto out_err;
  643. }
  644. more:
  645. event = (event_t *)(buf + head);
  646. ui_progress__update(progress, offset);
  647. if (self->header.needs_swap)
  648. perf_event_header__bswap(&event->header);
  649. size = event->header.size;
  650. if (size == 0)
  651. size = 8;
  652. if (head + event->header.size >= page_size * self->mmap_window) {
  653. int munmap_ret;
  654. shift = page_size * (head / page_size);
  655. munmap_ret = munmap(buf, page_size * self->mmap_window);
  656. assert(munmap_ret == 0);
  657. offset += shift;
  658. head -= shift;
  659. goto remap;
  660. }
  661. size = event->header.size;
  662. dump_printf("\n%#Lx [%#x]: event: %d\n",
  663. offset + head, event->header.size, event->header.type);
  664. if (size == 0 ||
  665. perf_session__process_event(self, event, ops, offset, head) < 0) {
  666. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  667. offset + head, event->header.size,
  668. event->header.type);
  669. /*
  670. * assume we lost track of the stream, check alignment, and
  671. * increment a single u64 in the hope to catch on again 'soon'.
  672. */
  673. if (unlikely(head & 7))
  674. head &= ~7ULL;
  675. size = 8;
  676. }
  677. head += size;
  678. if (offset + head >= data_offset + data_size)
  679. goto done;
  680. if (offset + head < file_size)
  681. goto more;
  682. done:
  683. err = 0;
  684. /* do the final flush for ordered samples */
  685. self->ordered_samples.next_flush = ULLONG_MAX;
  686. flush_sample_queue(self, ops);
  687. out_err:
  688. ui_progress__delete(progress);
  689. return err;
  690. }
  691. int perf_session__process_events(struct perf_session *self,
  692. struct perf_event_ops *ops)
  693. {
  694. int err;
  695. if (perf_session__register_idle_thread(self) == NULL)
  696. return -ENOMEM;
  697. if (!symbol_conf.full_paths) {
  698. char bf[PATH_MAX];
  699. if (getcwd(bf, sizeof(bf)) == NULL) {
  700. err = -errno;
  701. out_getcwd_err:
  702. pr_err("failed to get the current directory\n");
  703. goto out_err;
  704. }
  705. self->cwd = strdup(bf);
  706. if (self->cwd == NULL) {
  707. err = -ENOMEM;
  708. goto out_getcwd_err;
  709. }
  710. self->cwdlen = strlen(self->cwd);
  711. }
  712. if (!self->fd_pipe)
  713. err = __perf_session__process_events(self,
  714. self->header.data_offset,
  715. self->header.data_size,
  716. self->size, ops);
  717. else
  718. err = __perf_session__process_pipe_events(self, ops);
  719. out_err:
  720. return err;
  721. }
  722. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  723. {
  724. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  725. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  726. return false;
  727. }
  728. return true;
  729. }
  730. int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
  731. const char *symbol_name,
  732. u64 addr)
  733. {
  734. char *bracket;
  735. enum map_type i;
  736. struct ref_reloc_sym *ref;
  737. ref = zalloc(sizeof(struct ref_reloc_sym));
  738. if (ref == NULL)
  739. return -ENOMEM;
  740. ref->name = strdup(symbol_name);
  741. if (ref->name == NULL) {
  742. free(ref);
  743. return -ENOMEM;
  744. }
  745. bracket = strchr(ref->name, ']');
  746. if (bracket)
  747. *bracket = '\0';
  748. ref->addr = addr;
  749. for (i = 0; i < MAP__NR_TYPES; ++i) {
  750. struct kmap *kmap = map__kmap(maps[i]);
  751. kmap->ref_reloc_sym = ref;
  752. }
  753. return 0;
  754. }
  755. size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
  756. {
  757. return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
  758. __dsos__fprintf(&self->host_machine.user_dsos, fp) +
  759. machines__fprintf_dsos(&self->machines, fp);
  760. }
  761. size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
  762. bool with_hits)
  763. {
  764. size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
  765. return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
  766. }