session.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. #define _FILE_OFFSET_BITS 64
  2. #include <linux/kernel.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include "session.h"
  7. #include "sort.h"
  8. #include "util.h"
  9. static int perf_session__open(struct perf_session *self, bool force)
  10. {
  11. struct stat input_stat;
  12. self->fd = open(self->filename, O_RDONLY);
  13. if (self->fd < 0) {
  14. pr_err("failed to open file: %s", self->filename);
  15. if (!strcmp(self->filename, "perf.data"))
  16. pr_err(" (try 'perf record' first)");
  17. pr_err("\n");
  18. return -errno;
  19. }
  20. if (fstat(self->fd, &input_stat) < 0)
  21. goto out_close;
  22. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  23. pr_err("file %s not owned by current user or root\n",
  24. self->filename);
  25. goto out_close;
  26. }
  27. if (!input_stat.st_size) {
  28. pr_info("zero-sized file (%s), nothing to do!\n",
  29. self->filename);
  30. goto out_close;
  31. }
  32. if (perf_header__read(&self->header, self->fd) < 0) {
  33. pr_err("incompatible file format");
  34. goto out_close;
  35. }
  36. self->size = input_stat.st_size;
  37. return 0;
  38. out_close:
  39. close(self->fd);
  40. self->fd = -1;
  41. return -1;
  42. }
  43. static inline int perf_session__create_kernel_maps(struct perf_session *self)
  44. {
  45. return map_groups__create_kernel_maps(&self->kmaps, self->vmlinux_maps);
  46. }
  47. struct perf_session *perf_session__new(const char *filename, int mode, bool force)
  48. {
  49. size_t len = filename ? strlen(filename) + 1 : 0;
  50. struct perf_session *self = zalloc(sizeof(*self) + len);
  51. if (self == NULL)
  52. goto out;
  53. if (perf_header__init(&self->header) < 0)
  54. goto out_free;
  55. memcpy(self->filename, filename, len);
  56. self->threads = RB_ROOT;
  57. self->stats_by_id = RB_ROOT;
  58. self->last_match = NULL;
  59. self->mmap_window = 32;
  60. self->cwd = NULL;
  61. self->cwdlen = 0;
  62. self->unknown_events = 0;
  63. map_groups__init(&self->kmaps);
  64. if (mode == O_RDONLY) {
  65. if (perf_session__open(self, force) < 0)
  66. goto out_delete;
  67. } else if (mode == O_WRONLY) {
  68. /*
  69. * In O_RDONLY mode this will be performed when reading the
  70. * kernel MMAP event, in event__process_mmap().
  71. */
  72. if (perf_session__create_kernel_maps(self) < 0)
  73. goto out_delete;
  74. }
  75. self->sample_type = perf_header__sample_type(&self->header);
  76. out:
  77. return self;
  78. out_free:
  79. free(self);
  80. return NULL;
  81. out_delete:
  82. perf_session__delete(self);
  83. return NULL;
  84. }
  85. void perf_session__delete(struct perf_session *self)
  86. {
  87. perf_header__exit(&self->header);
  88. close(self->fd);
  89. free(self->cwd);
  90. free(self);
  91. }
  92. static bool symbol__match_parent_regex(struct symbol *sym)
  93. {
  94. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  95. return 1;
  96. return 0;
  97. }
  98. struct symbol **perf_session__resolve_callchain(struct perf_session *self,
  99. struct thread *thread,
  100. struct ip_callchain *chain,
  101. struct symbol **parent)
  102. {
  103. u8 cpumode = PERF_RECORD_MISC_USER;
  104. struct symbol **syms = NULL;
  105. unsigned int i;
  106. if (symbol_conf.use_callchain) {
  107. syms = calloc(chain->nr, sizeof(*syms));
  108. if (!syms) {
  109. fprintf(stderr, "Can't allocate memory for symbols\n");
  110. exit(-1);
  111. }
  112. }
  113. for (i = 0; i < chain->nr; i++) {
  114. u64 ip = chain->ips[i];
  115. struct addr_location al;
  116. if (ip >= PERF_CONTEXT_MAX) {
  117. switch (ip) {
  118. case PERF_CONTEXT_HV:
  119. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  120. case PERF_CONTEXT_KERNEL:
  121. cpumode = PERF_RECORD_MISC_KERNEL; break;
  122. case PERF_CONTEXT_USER:
  123. cpumode = PERF_RECORD_MISC_USER; break;
  124. default:
  125. break;
  126. }
  127. continue;
  128. }
  129. thread__find_addr_location(thread, self, cpumode,
  130. MAP__FUNCTION, ip, &al, NULL);
  131. if (al.sym != NULL) {
  132. if (sort__has_parent && !*parent &&
  133. symbol__match_parent_regex(al.sym))
  134. *parent = al.sym;
  135. if (!symbol_conf.use_callchain)
  136. break;
  137. syms[i] = al.sym;
  138. }
  139. }
  140. return syms;
  141. }
  142. static int process_event_stub(event_t *event __used,
  143. struct perf_session *session __used)
  144. {
  145. dump_printf(": unhandled!\n");
  146. return 0;
  147. }
  148. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  149. {
  150. if (handler->sample == NULL)
  151. handler->sample = process_event_stub;
  152. if (handler->mmap == NULL)
  153. handler->mmap = process_event_stub;
  154. if (handler->comm == NULL)
  155. handler->comm = process_event_stub;
  156. if (handler->fork == NULL)
  157. handler->fork = process_event_stub;
  158. if (handler->exit == NULL)
  159. handler->exit = process_event_stub;
  160. if (handler->lost == NULL)
  161. handler->lost = process_event_stub;
  162. if (handler->read == NULL)
  163. handler->read = process_event_stub;
  164. if (handler->throttle == NULL)
  165. handler->throttle = process_event_stub;
  166. if (handler->unthrottle == NULL)
  167. handler->unthrottle = process_event_stub;
  168. }
  169. static const char *event__name[] = {
  170. [0] = "TOTAL",
  171. [PERF_RECORD_MMAP] = "MMAP",
  172. [PERF_RECORD_LOST] = "LOST",
  173. [PERF_RECORD_COMM] = "COMM",
  174. [PERF_RECORD_EXIT] = "EXIT",
  175. [PERF_RECORD_THROTTLE] = "THROTTLE",
  176. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  177. [PERF_RECORD_FORK] = "FORK",
  178. [PERF_RECORD_READ] = "READ",
  179. [PERF_RECORD_SAMPLE] = "SAMPLE",
  180. };
  181. unsigned long event__total[PERF_RECORD_MAX];
  182. void event__print_totals(void)
  183. {
  184. int i;
  185. for (i = 0; i < PERF_RECORD_MAX; ++i)
  186. pr_info("%10s events: %10ld\n",
  187. event__name[i], event__total[i]);
  188. }
  189. void mem_bswap_64(void *src, int byte_size)
  190. {
  191. u64 *m = src;
  192. while (byte_size > 0) {
  193. *m = bswap_64(*m);
  194. byte_size -= sizeof(u64);
  195. ++m;
  196. }
  197. }
  198. static void event__all64_swap(event_t *self)
  199. {
  200. struct perf_event_header *hdr = &self->header;
  201. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  202. }
  203. static void event__comm_swap(event_t *self)
  204. {
  205. self->comm.pid = bswap_32(self->comm.pid);
  206. self->comm.tid = bswap_32(self->comm.tid);
  207. }
  208. static void event__mmap_swap(event_t *self)
  209. {
  210. self->mmap.pid = bswap_32(self->mmap.pid);
  211. self->mmap.tid = bswap_32(self->mmap.tid);
  212. self->mmap.start = bswap_64(self->mmap.start);
  213. self->mmap.len = bswap_64(self->mmap.len);
  214. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  215. }
  216. static void event__task_swap(event_t *self)
  217. {
  218. self->fork.pid = bswap_32(self->fork.pid);
  219. self->fork.tid = bswap_32(self->fork.tid);
  220. self->fork.ppid = bswap_32(self->fork.ppid);
  221. self->fork.ptid = bswap_32(self->fork.ptid);
  222. self->fork.time = bswap_64(self->fork.time);
  223. }
  224. static void event__read_swap(event_t *self)
  225. {
  226. self->read.pid = bswap_32(self->read.pid);
  227. self->read.tid = bswap_32(self->read.tid);
  228. self->read.value = bswap_64(self->read.value);
  229. self->read.time_enabled = bswap_64(self->read.time_enabled);
  230. self->read.time_running = bswap_64(self->read.time_running);
  231. self->read.id = bswap_64(self->read.id);
  232. }
  233. typedef void (*event__swap_op)(event_t *self);
  234. static event__swap_op event__swap_ops[] = {
  235. [PERF_RECORD_MMAP] = event__mmap_swap,
  236. [PERF_RECORD_COMM] = event__comm_swap,
  237. [PERF_RECORD_FORK] = event__task_swap,
  238. [PERF_RECORD_EXIT] = event__task_swap,
  239. [PERF_RECORD_LOST] = event__all64_swap,
  240. [PERF_RECORD_READ] = event__read_swap,
  241. [PERF_RECORD_SAMPLE] = event__all64_swap,
  242. [PERF_RECORD_MAX] = NULL,
  243. };
  244. static int perf_session__process_event(struct perf_session *self,
  245. event_t *event,
  246. struct perf_event_ops *ops,
  247. u64 offset, u64 head)
  248. {
  249. trace_event(event);
  250. if (event->header.type < PERF_RECORD_MAX) {
  251. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  252. offset + head, event->header.size,
  253. event__name[event->header.type]);
  254. ++event__total[0];
  255. ++event__total[event->header.type];
  256. }
  257. if (self->header.needs_swap && event__swap_ops[event->header.type])
  258. event__swap_ops[event->header.type](event);
  259. switch (event->header.type) {
  260. case PERF_RECORD_SAMPLE:
  261. return ops->sample(event, self);
  262. case PERF_RECORD_MMAP:
  263. return ops->mmap(event, self);
  264. case PERF_RECORD_COMM:
  265. return ops->comm(event, self);
  266. case PERF_RECORD_FORK:
  267. return ops->fork(event, self);
  268. case PERF_RECORD_EXIT:
  269. return ops->exit(event, self);
  270. case PERF_RECORD_LOST:
  271. return ops->lost(event, self);
  272. case PERF_RECORD_READ:
  273. return ops->read(event, self);
  274. case PERF_RECORD_THROTTLE:
  275. return ops->throttle(event, self);
  276. case PERF_RECORD_UNTHROTTLE:
  277. return ops->unthrottle(event, self);
  278. default:
  279. self->unknown_events++;
  280. return -1;
  281. }
  282. }
  283. void perf_event_header__bswap(struct perf_event_header *self)
  284. {
  285. self->type = bswap_32(self->type);
  286. self->misc = bswap_16(self->misc);
  287. self->size = bswap_16(self->size);
  288. }
  289. int perf_header__read_build_ids(struct perf_header *self,
  290. int input, u64 offset, u64 size)
  291. {
  292. struct build_id_event bev;
  293. char filename[PATH_MAX];
  294. u64 limit = offset + size;
  295. int err = -1;
  296. while (offset < limit) {
  297. struct dso *dso;
  298. ssize_t len;
  299. struct list_head *head = &dsos__user;
  300. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  301. goto out;
  302. if (self->needs_swap)
  303. perf_event_header__bswap(&bev.header);
  304. len = bev.header.size - sizeof(bev);
  305. if (read(input, filename, len) != len)
  306. goto out;
  307. if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
  308. head = &dsos__kernel;
  309. dso = __dsos__findnew(head, filename);
  310. if (dso != NULL) {
  311. dso__set_build_id(dso, &bev.build_id);
  312. if (head == &dsos__kernel && filename[0] == '[')
  313. dso->kernel = 1;
  314. }
  315. offset += bev.header.size;
  316. }
  317. err = 0;
  318. out:
  319. return err;
  320. }
  321. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  322. {
  323. struct thread *thread = perf_session__findnew(self, 0);
  324. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  325. pr_err("problem inserting idle task.\n");
  326. thread = NULL;
  327. }
  328. return thread;
  329. }
  330. int __perf_session__process_events(struct perf_session *self,
  331. u64 data_offset, u64 data_size,
  332. u64 file_size, struct perf_event_ops *ops)
  333. {
  334. int err, mmap_prot, mmap_flags;
  335. u64 head, shift;
  336. u64 offset = 0;
  337. size_t page_size;
  338. event_t *event;
  339. uint32_t size;
  340. char *buf;
  341. perf_event_ops__fill_defaults(ops);
  342. page_size = sysconf(_SC_PAGESIZE);
  343. head = data_offset;
  344. shift = page_size * (head / page_size);
  345. offset += shift;
  346. head -= shift;
  347. mmap_prot = PROT_READ;
  348. mmap_flags = MAP_SHARED;
  349. if (self->header.needs_swap) {
  350. mmap_prot |= PROT_WRITE;
  351. mmap_flags = MAP_PRIVATE;
  352. }
  353. remap:
  354. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  355. mmap_flags, self->fd, offset);
  356. if (buf == MAP_FAILED) {
  357. pr_err("failed to mmap file\n");
  358. err = -errno;
  359. goto out_err;
  360. }
  361. more:
  362. event = (event_t *)(buf + head);
  363. if (self->header.needs_swap)
  364. perf_event_header__bswap(&event->header);
  365. size = event->header.size;
  366. if (size == 0)
  367. size = 8;
  368. if (head + event->header.size >= page_size * self->mmap_window) {
  369. int munmap_ret;
  370. shift = page_size * (head / page_size);
  371. munmap_ret = munmap(buf, page_size * self->mmap_window);
  372. assert(munmap_ret == 0);
  373. offset += shift;
  374. head -= shift;
  375. goto remap;
  376. }
  377. size = event->header.size;
  378. dump_printf("\n%#Lx [%#x]: event: %d\n",
  379. offset + head, event->header.size, event->header.type);
  380. if (size == 0 ||
  381. perf_session__process_event(self, event, ops, offset, head) < 0) {
  382. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  383. offset + head, event->header.size,
  384. event->header.type);
  385. /*
  386. * assume we lost track of the stream, check alignment, and
  387. * increment a single u64 in the hope to catch on again 'soon'.
  388. */
  389. if (unlikely(head & 7))
  390. head &= ~7ULL;
  391. size = 8;
  392. }
  393. head += size;
  394. if (offset + head >= data_offset + data_size)
  395. goto done;
  396. if (offset + head < file_size)
  397. goto more;
  398. done:
  399. err = 0;
  400. out_err:
  401. return err;
  402. }
  403. int perf_session__process_events(struct perf_session *self,
  404. struct perf_event_ops *ops)
  405. {
  406. int err;
  407. if (perf_session__register_idle_thread(self) == NULL)
  408. return -ENOMEM;
  409. if (!symbol_conf.full_paths) {
  410. char bf[PATH_MAX];
  411. if (getcwd(bf, sizeof(bf)) == NULL) {
  412. err = -errno;
  413. out_getcwd_err:
  414. pr_err("failed to get the current directory\n");
  415. goto out_err;
  416. }
  417. self->cwd = strdup(bf);
  418. if (self->cwd == NULL) {
  419. err = -ENOMEM;
  420. goto out_getcwd_err;
  421. }
  422. self->cwdlen = strlen(self->cwd);
  423. }
  424. err = __perf_session__process_events(self, self->header.data_offset,
  425. self->header.data_size,
  426. self->size, ops);
  427. out_err:
  428. return err;
  429. }
  430. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  431. {
  432. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  433. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  434. return false;
  435. }
  436. return true;
  437. }
  438. int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
  439. const char *symbol_name,
  440. u64 addr)
  441. {
  442. char *bracket;
  443. enum map_type i;
  444. self->ref_reloc_sym.name = strdup(symbol_name);
  445. if (self->ref_reloc_sym.name == NULL)
  446. return -ENOMEM;
  447. bracket = strchr(self->ref_reloc_sym.name, ']');
  448. if (bracket)
  449. *bracket = '\0';
  450. self->ref_reloc_sym.addr = addr;
  451. for (i = 0; i < MAP__NR_TYPES; ++i) {
  452. struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
  453. kmap->ref_reloc_sym = &self->ref_reloc_sym;
  454. }
  455. return 0;
  456. }
  457. static u64 map__reloc_map_ip(struct map *map, u64 ip)
  458. {
  459. return ip + (s64)map->pgoff;
  460. }
  461. static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
  462. {
  463. return ip - (s64)map->pgoff;
  464. }
  465. void map__reloc_vmlinux(struct map *self)
  466. {
  467. struct kmap *kmap = map__kmap(self);
  468. s64 reloc;
  469. if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
  470. return;
  471. reloc = (kmap->ref_reloc_sym->unrelocated_addr -
  472. kmap->ref_reloc_sym->addr);
  473. if (!reloc)
  474. return;
  475. self->map_ip = map__reloc_map_ip;
  476. self->unmap_ip = map__reloc_unmap_ip;
  477. self->pgoff = reloc;
  478. }