session.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. #define _FILE_OFFSET_BITS 64
  2. #include <linux/kernel.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include "session.h"
  7. #include "sort.h"
  8. #include "util.h"
  9. static int perf_session__open(struct perf_session *self, bool force)
  10. {
  11. struct stat input_stat;
  12. if (!strcmp(self->filename, "-")) {
  13. self->fd_pipe = true;
  14. self->fd = STDIN_FILENO;
  15. if (perf_header__read(self, self->fd) < 0)
  16. pr_err("incompatible file format");
  17. return 0;
  18. }
  19. self->fd = open(self->filename, O_RDONLY);
  20. if (self->fd < 0) {
  21. pr_err("failed to open file: %s", self->filename);
  22. if (!strcmp(self->filename, "perf.data"))
  23. pr_err(" (try 'perf record' first)");
  24. pr_err("\n");
  25. return -errno;
  26. }
  27. if (fstat(self->fd, &input_stat) < 0)
  28. goto out_close;
  29. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  30. pr_err("file %s not owned by current user or root\n",
  31. self->filename);
  32. goto out_close;
  33. }
  34. if (!input_stat.st_size) {
  35. pr_info("zero-sized file (%s), nothing to do!\n",
  36. self->filename);
  37. goto out_close;
  38. }
  39. if (perf_header__read(self, self->fd) < 0) {
  40. pr_err("incompatible file format");
  41. goto out_close;
  42. }
  43. self->size = input_stat.st_size;
  44. return 0;
  45. out_close:
  46. close(self->fd);
  47. self->fd = -1;
  48. return -1;
  49. }
  50. void perf_session__update_sample_type(struct perf_session *self)
  51. {
  52. self->sample_type = perf_header__sample_type(&self->header);
  53. }
  54. int perf_session__create_kernel_maps(struct perf_session *self)
  55. {
  56. int ret;
  57. struct rb_root *root = &self->kerninfo_root;
  58. ret = map_groups__create_kernel_maps(root, HOST_KERNEL_ID);
  59. if (ret >= 0)
  60. ret = map_groups__create_guest_kernel_maps(root);
  61. return ret;
  62. }
  63. struct perf_session *perf_session__new(const char *filename, int mode, bool force)
  64. {
  65. size_t len = filename ? strlen(filename) + 1 : 0;
  66. struct perf_session *self = zalloc(sizeof(*self) + len);
  67. if (self == NULL)
  68. goto out;
  69. if (perf_header__init(&self->header) < 0)
  70. goto out_free;
  71. memcpy(self->filename, filename, len);
  72. self->threads = RB_ROOT;
  73. self->stats_by_id = RB_ROOT;
  74. self->last_match = NULL;
  75. self->mmap_window = 32;
  76. self->cwd = NULL;
  77. self->cwdlen = 0;
  78. self->unknown_events = 0;
  79. self->kerninfo_root = RB_ROOT;
  80. self->ordered_samples.flush_limit = ULLONG_MAX;
  81. INIT_LIST_HEAD(&self->ordered_samples.samples_head);
  82. if (mode == O_RDONLY) {
  83. if (perf_session__open(self, force) < 0)
  84. goto out_delete;
  85. } else if (mode == O_WRONLY) {
  86. /*
  87. * In O_RDONLY mode this will be performed when reading the
  88. * kernel MMAP event, in event__process_mmap().
  89. */
  90. if (perf_session__create_kernel_maps(self) < 0)
  91. goto out_delete;
  92. }
  93. perf_session__update_sample_type(self);
  94. out:
  95. return self;
  96. out_free:
  97. free(self);
  98. return NULL;
  99. out_delete:
  100. perf_session__delete(self);
  101. return NULL;
  102. }
  103. void perf_session__delete(struct perf_session *self)
  104. {
  105. perf_header__exit(&self->header);
  106. close(self->fd);
  107. free(self->cwd);
  108. free(self);
  109. }
  110. static bool symbol__match_parent_regex(struct symbol *sym)
  111. {
  112. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  113. return 1;
  114. return 0;
  115. }
  116. struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
  117. struct thread *thread,
  118. struct ip_callchain *chain,
  119. struct symbol **parent)
  120. {
  121. u8 cpumode = PERF_RECORD_MISC_USER;
  122. unsigned int i;
  123. struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
  124. if (!syms)
  125. return NULL;
  126. for (i = 0; i < chain->nr; i++) {
  127. u64 ip = chain->ips[i];
  128. struct addr_location al;
  129. if (ip >= PERF_CONTEXT_MAX) {
  130. switch (ip) {
  131. case PERF_CONTEXT_HV:
  132. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  133. case PERF_CONTEXT_KERNEL:
  134. cpumode = PERF_RECORD_MISC_KERNEL; break;
  135. case PERF_CONTEXT_USER:
  136. cpumode = PERF_RECORD_MISC_USER; break;
  137. default:
  138. break;
  139. }
  140. continue;
  141. }
  142. al.filtered = false;
  143. thread__find_addr_location(thread, self, cpumode,
  144. MAP__FUNCTION, thread->pid, ip, &al, NULL);
  145. if (al.sym != NULL) {
  146. if (sort__has_parent && !*parent &&
  147. symbol__match_parent_regex(al.sym))
  148. *parent = al.sym;
  149. if (!symbol_conf.use_callchain)
  150. break;
  151. syms[i].map = al.map;
  152. syms[i].sym = al.sym;
  153. }
  154. }
  155. return syms;
  156. }
  157. static int process_event_stub(event_t *event __used,
  158. struct perf_session *session __used)
  159. {
  160. dump_printf(": unhandled!\n");
  161. return 0;
  162. }
  163. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  164. {
  165. if (handler->sample == NULL)
  166. handler->sample = process_event_stub;
  167. if (handler->mmap == NULL)
  168. handler->mmap = process_event_stub;
  169. if (handler->comm == NULL)
  170. handler->comm = process_event_stub;
  171. if (handler->fork == NULL)
  172. handler->fork = process_event_stub;
  173. if (handler->exit == NULL)
  174. handler->exit = process_event_stub;
  175. if (handler->lost == NULL)
  176. handler->lost = process_event_stub;
  177. if (handler->read == NULL)
  178. handler->read = process_event_stub;
  179. if (handler->throttle == NULL)
  180. handler->throttle = process_event_stub;
  181. if (handler->unthrottle == NULL)
  182. handler->unthrottle = process_event_stub;
  183. if (handler->attr == NULL)
  184. handler->attr = process_event_stub;
  185. if (handler->event_type == NULL)
  186. handler->event_type = process_event_stub;
  187. if (handler->tracing_data == NULL)
  188. handler->tracing_data = process_event_stub;
  189. if (handler->build_id == NULL)
  190. handler->build_id = process_event_stub;
  191. }
  192. static const char *event__name[] = {
  193. [0] = "TOTAL",
  194. [PERF_RECORD_MMAP] = "MMAP",
  195. [PERF_RECORD_LOST] = "LOST",
  196. [PERF_RECORD_COMM] = "COMM",
  197. [PERF_RECORD_EXIT] = "EXIT",
  198. [PERF_RECORD_THROTTLE] = "THROTTLE",
  199. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  200. [PERF_RECORD_FORK] = "FORK",
  201. [PERF_RECORD_READ] = "READ",
  202. [PERF_RECORD_SAMPLE] = "SAMPLE",
  203. [PERF_RECORD_HEADER_ATTR] = "ATTR",
  204. [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  205. [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  206. [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  207. };
  208. unsigned long event__total[PERF_RECORD_HEADER_MAX];
  209. void event__print_totals(void)
  210. {
  211. int i;
  212. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  213. if (!event__name[i])
  214. continue;
  215. pr_info("%10s events: %10ld\n",
  216. event__name[i], event__total[i]);
  217. }
  218. }
  219. void mem_bswap_64(void *src, int byte_size)
  220. {
  221. u64 *m = src;
  222. while (byte_size > 0) {
  223. *m = bswap_64(*m);
  224. byte_size -= sizeof(u64);
  225. ++m;
  226. }
  227. }
  228. static void event__all64_swap(event_t *self)
  229. {
  230. struct perf_event_header *hdr = &self->header;
  231. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  232. }
  233. static void event__comm_swap(event_t *self)
  234. {
  235. self->comm.pid = bswap_32(self->comm.pid);
  236. self->comm.tid = bswap_32(self->comm.tid);
  237. }
  238. static void event__mmap_swap(event_t *self)
  239. {
  240. self->mmap.pid = bswap_32(self->mmap.pid);
  241. self->mmap.tid = bswap_32(self->mmap.tid);
  242. self->mmap.start = bswap_64(self->mmap.start);
  243. self->mmap.len = bswap_64(self->mmap.len);
  244. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  245. }
  246. static void event__task_swap(event_t *self)
  247. {
  248. self->fork.pid = bswap_32(self->fork.pid);
  249. self->fork.tid = bswap_32(self->fork.tid);
  250. self->fork.ppid = bswap_32(self->fork.ppid);
  251. self->fork.ptid = bswap_32(self->fork.ptid);
  252. self->fork.time = bswap_64(self->fork.time);
  253. }
  254. static void event__read_swap(event_t *self)
  255. {
  256. self->read.pid = bswap_32(self->read.pid);
  257. self->read.tid = bswap_32(self->read.tid);
  258. self->read.value = bswap_64(self->read.value);
  259. self->read.time_enabled = bswap_64(self->read.time_enabled);
  260. self->read.time_running = bswap_64(self->read.time_running);
  261. self->read.id = bswap_64(self->read.id);
  262. }
  263. static void event__attr_swap(event_t *self)
  264. {
  265. size_t size;
  266. self->attr.attr.type = bswap_32(self->attr.attr.type);
  267. self->attr.attr.size = bswap_32(self->attr.attr.size);
  268. self->attr.attr.config = bswap_64(self->attr.attr.config);
  269. self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
  270. self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
  271. self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
  272. self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
  273. self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
  274. self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
  275. self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
  276. size = self->header.size;
  277. size -= (void *)&self->attr.id - (void *)self;
  278. mem_bswap_64(self->attr.id, size);
  279. }
  280. static void event__event_type_swap(event_t *self)
  281. {
  282. self->event_type.event_type.event_id =
  283. bswap_64(self->event_type.event_type.event_id);
  284. }
  285. static void event__tracing_data_swap(event_t *self)
  286. {
  287. self->tracing_data.size = bswap_32(self->tracing_data.size);
  288. }
  289. typedef void (*event__swap_op)(event_t *self);
  290. static event__swap_op event__swap_ops[] = {
  291. [PERF_RECORD_MMAP] = event__mmap_swap,
  292. [PERF_RECORD_COMM] = event__comm_swap,
  293. [PERF_RECORD_FORK] = event__task_swap,
  294. [PERF_RECORD_EXIT] = event__task_swap,
  295. [PERF_RECORD_LOST] = event__all64_swap,
  296. [PERF_RECORD_READ] = event__read_swap,
  297. [PERF_RECORD_SAMPLE] = event__all64_swap,
  298. [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
  299. [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
  300. [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
  301. [PERF_RECORD_HEADER_BUILD_ID] = NULL,
  302. [PERF_RECORD_HEADER_MAX] = NULL,
  303. };
  304. struct sample_queue {
  305. u64 timestamp;
  306. struct sample_event *event;
  307. struct list_head list;
  308. };
  309. #define FLUSH_PERIOD (2 * NSEC_PER_SEC)
  310. static void flush_sample_queue(struct perf_session *s,
  311. struct perf_event_ops *ops)
  312. {
  313. struct list_head *head = &s->ordered_samples.samples_head;
  314. u64 limit = s->ordered_samples.flush_limit;
  315. struct sample_queue *tmp, *iter;
  316. if (!ops->ordered_samples)
  317. return;
  318. list_for_each_entry_safe(iter, tmp, head, list) {
  319. if (iter->timestamp > limit)
  320. return;
  321. if (iter == s->ordered_samples.last_inserted)
  322. s->ordered_samples.last_inserted = NULL;
  323. ops->sample((event_t *)iter->event, s);
  324. s->ordered_samples.last_flush = iter->timestamp;
  325. list_del(&iter->list);
  326. free(iter->event);
  327. free(iter);
  328. }
  329. }
  330. static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
  331. {
  332. struct sample_queue *iter;
  333. list_for_each_entry_reverse(iter, head, list) {
  334. if (iter->timestamp < new->timestamp) {
  335. list_add(&new->list, &iter->list);
  336. return;
  337. }
  338. }
  339. list_add(&new->list, head);
  340. }
  341. static void __queue_sample_before(struct sample_queue *new,
  342. struct sample_queue *iter,
  343. struct list_head *head)
  344. {
  345. list_for_each_entry_continue_reverse(iter, head, list) {
  346. if (iter->timestamp < new->timestamp) {
  347. list_add(&new->list, &iter->list);
  348. return;
  349. }
  350. }
  351. list_add(&new->list, head);
  352. }
  353. static void __queue_sample_after(struct sample_queue *new,
  354. struct sample_queue *iter,
  355. struct list_head *head)
  356. {
  357. list_for_each_entry_continue(iter, head, list) {
  358. if (iter->timestamp > new->timestamp) {
  359. list_add_tail(&new->list, &iter->list);
  360. return;
  361. }
  362. }
  363. list_add_tail(&new->list, head);
  364. }
  365. /* The queue is ordered by time */
  366. static void __queue_sample_event(struct sample_queue *new,
  367. struct perf_session *s)
  368. {
  369. struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
  370. struct list_head *head = &s->ordered_samples.samples_head;
  371. if (!last_inserted) {
  372. __queue_sample_end(new, head);
  373. return;
  374. }
  375. /*
  376. * Most of the time the current event has a timestamp
  377. * very close to the last event inserted, unless we just switched
  378. * to another event buffer. Having a sorting based on a list and
  379. * on the last inserted event that is close to the current one is
  380. * probably more efficient than an rbtree based sorting.
  381. */
  382. if (last_inserted->timestamp >= new->timestamp)
  383. __queue_sample_before(new, last_inserted, head);
  384. else
  385. __queue_sample_after(new, last_inserted, head);
  386. }
  387. static int queue_sample_event(event_t *event, struct sample_data *data,
  388. struct perf_session *s,
  389. struct perf_event_ops *ops)
  390. {
  391. u64 timestamp = data->time;
  392. struct sample_queue *new;
  393. u64 flush_limit;
  394. if (s->ordered_samples.flush_limit == ULLONG_MAX)
  395. s->ordered_samples.flush_limit = timestamp + FLUSH_PERIOD;
  396. if (timestamp < s->ordered_samples.last_flush) {
  397. printf("Warning: Timestamp below last timeslice flush\n");
  398. return -EINVAL;
  399. }
  400. new = malloc(sizeof(*new));
  401. if (!new)
  402. return -ENOMEM;
  403. new->timestamp = timestamp;
  404. new->event = malloc(event->header.size);
  405. if (!new->event) {
  406. free(new);
  407. return -ENOMEM;
  408. }
  409. memcpy(new->event, event, event->header.size);
  410. __queue_sample_event(new, s);
  411. s->ordered_samples.last_inserted = new;
  412. /*
  413. * We want to have a slice of events covering 2 * FLUSH_PERIOD
  414. * If FLUSH_PERIOD is big enough, it ensures every events that occured
  415. * in the first half of the timeslice have all been buffered and there
  416. * are none remaining (we need that because of the weakly ordered
  417. * event recording we have). Then once we reach the 2 * FLUSH_PERIOD
  418. * timeslice, we flush the first half to be gentle with the memory
  419. * (the second half can still get new events in the middle, so wait
  420. * another period to flush it)
  421. */
  422. flush_limit = s->ordered_samples.flush_limit;
  423. if (new->timestamp > flush_limit &&
  424. new->timestamp - flush_limit > FLUSH_PERIOD) {
  425. s->ordered_samples.flush_limit += FLUSH_PERIOD;
  426. flush_sample_queue(s, ops);
  427. }
  428. return 0;
  429. }
  430. static int perf_session__process_sample(event_t *event, struct perf_session *s,
  431. struct perf_event_ops *ops)
  432. {
  433. struct sample_data data;
  434. if (!ops->ordered_samples)
  435. return ops->sample(event, s);
  436. bzero(&data, sizeof(struct sample_data));
  437. event__parse_sample(event, s->sample_type, &data);
  438. queue_sample_event(event, &data, s, ops);
  439. return 0;
  440. }
  441. static int perf_session__process_event(struct perf_session *self,
  442. event_t *event,
  443. struct perf_event_ops *ops,
  444. u64 offset, u64 head)
  445. {
  446. trace_event(event);
  447. if (event->header.type < PERF_RECORD_HEADER_MAX) {
  448. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  449. offset + head, event->header.size,
  450. event__name[event->header.type]);
  451. ++event__total[0];
  452. ++event__total[event->header.type];
  453. }
  454. if (self->header.needs_swap && event__swap_ops[event->header.type])
  455. event__swap_ops[event->header.type](event);
  456. switch (event->header.type) {
  457. case PERF_RECORD_SAMPLE:
  458. return perf_session__process_sample(event, self, ops);
  459. case PERF_RECORD_MMAP:
  460. return ops->mmap(event, self);
  461. case PERF_RECORD_COMM:
  462. return ops->comm(event, self);
  463. case PERF_RECORD_FORK:
  464. return ops->fork(event, self);
  465. case PERF_RECORD_EXIT:
  466. return ops->exit(event, self);
  467. case PERF_RECORD_LOST:
  468. return ops->lost(event, self);
  469. case PERF_RECORD_READ:
  470. return ops->read(event, self);
  471. case PERF_RECORD_THROTTLE:
  472. return ops->throttle(event, self);
  473. case PERF_RECORD_UNTHROTTLE:
  474. return ops->unthrottle(event, self);
  475. case PERF_RECORD_HEADER_ATTR:
  476. return ops->attr(event, self);
  477. case PERF_RECORD_HEADER_EVENT_TYPE:
  478. return ops->event_type(event, self);
  479. case PERF_RECORD_HEADER_TRACING_DATA:
  480. /* setup for reading amidst mmap */
  481. lseek(self->fd, offset + head, SEEK_SET);
  482. return ops->tracing_data(event, self);
  483. case PERF_RECORD_HEADER_BUILD_ID:
  484. return ops->build_id(event, self);
  485. default:
  486. self->unknown_events++;
  487. return -1;
  488. }
  489. }
  490. void perf_event_header__bswap(struct perf_event_header *self)
  491. {
  492. self->type = bswap_32(self->type);
  493. self->misc = bswap_16(self->misc);
  494. self->size = bswap_16(self->size);
  495. }
  496. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  497. {
  498. struct thread *thread = perf_session__findnew(self, 0);
  499. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  500. pr_err("problem inserting idle task.\n");
  501. thread = NULL;
  502. }
  503. return thread;
  504. }
  505. int do_read(int fd, void *buf, size_t size)
  506. {
  507. void *buf_start = buf;
  508. while (size) {
  509. int ret = read(fd, buf, size);
  510. if (ret <= 0)
  511. return ret;
  512. size -= ret;
  513. buf += ret;
  514. }
  515. return buf - buf_start;
  516. }
  517. #define session_done() (*(volatile int *)(&session_done))
  518. volatile int session_done;
  519. static int __perf_session__process_pipe_events(struct perf_session *self,
  520. struct perf_event_ops *ops)
  521. {
  522. event_t event;
  523. uint32_t size;
  524. int skip = 0;
  525. u64 head;
  526. int err;
  527. void *p;
  528. perf_event_ops__fill_defaults(ops);
  529. head = 0;
  530. more:
  531. err = do_read(self->fd, &event, sizeof(struct perf_event_header));
  532. if (err <= 0) {
  533. if (err == 0)
  534. goto done;
  535. pr_err("failed to read event header\n");
  536. goto out_err;
  537. }
  538. if (self->header.needs_swap)
  539. perf_event_header__bswap(&event.header);
  540. size = event.header.size;
  541. if (size == 0)
  542. size = 8;
  543. p = &event;
  544. p += sizeof(struct perf_event_header);
  545. err = do_read(self->fd, p, size - sizeof(struct perf_event_header));
  546. if (err <= 0) {
  547. if (err == 0) {
  548. pr_err("unexpected end of event stream\n");
  549. goto done;
  550. }
  551. pr_err("failed to read event data\n");
  552. goto out_err;
  553. }
  554. if (size == 0 ||
  555. (skip = perf_session__process_event(self, &event, ops,
  556. 0, head)) < 0) {
  557. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  558. head, event.header.size, event.header.type);
  559. /*
  560. * assume we lost track of the stream, check alignment, and
  561. * increment a single u64 in the hope to catch on again 'soon'.
  562. */
  563. if (unlikely(head & 7))
  564. head &= ~7ULL;
  565. size = 8;
  566. }
  567. head += size;
  568. dump_printf("\n%#Lx [%#x]: event: %d\n",
  569. head, event.header.size, event.header.type);
  570. if (skip > 0)
  571. head += skip;
  572. if (!session_done())
  573. goto more;
  574. done:
  575. err = 0;
  576. out_err:
  577. return err;
  578. }
  579. int __perf_session__process_events(struct perf_session *self,
  580. u64 data_offset, u64 data_size,
  581. u64 file_size, struct perf_event_ops *ops)
  582. {
  583. int err, mmap_prot, mmap_flags;
  584. u64 head, shift;
  585. u64 offset = 0;
  586. size_t page_size;
  587. event_t *event;
  588. uint32_t size;
  589. char *buf;
  590. struct ui_progress *progress = ui_progress__new("Processing events...",
  591. self->size);
  592. if (progress == NULL)
  593. return -1;
  594. perf_event_ops__fill_defaults(ops);
  595. page_size = sysconf(_SC_PAGESIZE);
  596. head = data_offset;
  597. shift = page_size * (head / page_size);
  598. offset += shift;
  599. head -= shift;
  600. mmap_prot = PROT_READ;
  601. mmap_flags = MAP_SHARED;
  602. if (self->header.needs_swap) {
  603. mmap_prot |= PROT_WRITE;
  604. mmap_flags = MAP_PRIVATE;
  605. }
  606. remap:
  607. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  608. mmap_flags, self->fd, offset);
  609. if (buf == MAP_FAILED) {
  610. pr_err("failed to mmap file\n");
  611. err = -errno;
  612. goto out_err;
  613. }
  614. more:
  615. event = (event_t *)(buf + head);
  616. ui_progress__update(progress, offset);
  617. if (self->header.needs_swap)
  618. perf_event_header__bswap(&event->header);
  619. size = event->header.size;
  620. if (size == 0)
  621. size = 8;
  622. if (head + event->header.size >= page_size * self->mmap_window) {
  623. int munmap_ret;
  624. shift = page_size * (head / page_size);
  625. munmap_ret = munmap(buf, page_size * self->mmap_window);
  626. assert(munmap_ret == 0);
  627. offset += shift;
  628. head -= shift;
  629. goto remap;
  630. }
  631. size = event->header.size;
  632. dump_printf("\n%#Lx [%#x]: event: %d\n",
  633. offset + head, event->header.size, event->header.type);
  634. if (size == 0 ||
  635. perf_session__process_event(self, event, ops, offset, head) < 0) {
  636. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  637. offset + head, event->header.size,
  638. event->header.type);
  639. /*
  640. * assume we lost track of the stream, check alignment, and
  641. * increment a single u64 in the hope to catch on again 'soon'.
  642. */
  643. if (unlikely(head & 7))
  644. head &= ~7ULL;
  645. size = 8;
  646. }
  647. head += size;
  648. if (offset + head >= data_offset + data_size)
  649. goto done;
  650. if (offset + head < file_size)
  651. goto more;
  652. done:
  653. err = 0;
  654. /* do the final flush for ordered samples */
  655. self->ordered_samples.flush_limit = ULLONG_MAX;
  656. flush_sample_queue(self, ops);
  657. out_err:
  658. ui_progress__delete(progress);
  659. return err;
  660. }
  661. int perf_session__process_events(struct perf_session *self,
  662. struct perf_event_ops *ops)
  663. {
  664. int err;
  665. if (perf_session__register_idle_thread(self) == NULL)
  666. return -ENOMEM;
  667. if (!symbol_conf.full_paths) {
  668. char bf[PATH_MAX];
  669. if (getcwd(bf, sizeof(bf)) == NULL) {
  670. err = -errno;
  671. out_getcwd_err:
  672. pr_err("failed to get the current directory\n");
  673. goto out_err;
  674. }
  675. self->cwd = strdup(bf);
  676. if (self->cwd == NULL) {
  677. err = -ENOMEM;
  678. goto out_getcwd_err;
  679. }
  680. self->cwdlen = strlen(self->cwd);
  681. }
  682. if (!self->fd_pipe)
  683. err = __perf_session__process_events(self,
  684. self->header.data_offset,
  685. self->header.data_size,
  686. self->size, ops);
  687. else
  688. err = __perf_session__process_pipe_events(self, ops);
  689. out_err:
  690. return err;
  691. }
  692. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  693. {
  694. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  695. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  696. return false;
  697. }
  698. return true;
  699. }
  700. int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
  701. const char *symbol_name,
  702. u64 addr)
  703. {
  704. char *bracket;
  705. enum map_type i;
  706. struct ref_reloc_sym *ref;
  707. ref = zalloc(sizeof(struct ref_reloc_sym));
  708. if (ref == NULL)
  709. return -ENOMEM;
  710. ref->name = strdup(symbol_name);
  711. if (ref->name == NULL) {
  712. free(ref);
  713. return -ENOMEM;
  714. }
  715. bracket = strchr(ref->name, ']');
  716. if (bracket)
  717. *bracket = '\0';
  718. ref->addr = addr;
  719. for (i = 0; i < MAP__NR_TYPES; ++i) {
  720. struct kmap *kmap = map__kmap(maps[i]);
  721. kmap->ref_reloc_sym = ref;
  722. }
  723. return 0;
  724. }