session.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. #define _FILE_OFFSET_BITS 64
  2. #include <linux/kernel.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <sys/types.h>
  6. #include "session.h"
  7. #include "sort.h"
  8. #include "util.h"
  9. static int perf_session__open(struct perf_session *self, bool force)
  10. {
  11. struct stat input_stat;
  12. self->fd = open(self->filename, O_RDONLY);
  13. if (self->fd < 0) {
  14. pr_err("failed to open file: %s", self->filename);
  15. if (!strcmp(self->filename, "perf.data"))
  16. pr_err(" (try 'perf record' first)");
  17. pr_err("\n");
  18. return -errno;
  19. }
  20. if (fstat(self->fd, &input_stat) < 0)
  21. goto out_close;
  22. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  23. pr_err("file %s not owned by current user or root\n",
  24. self->filename);
  25. goto out_close;
  26. }
  27. if (!input_stat.st_size) {
  28. pr_info("zero-sized file (%s), nothing to do!\n",
  29. self->filename);
  30. goto out_close;
  31. }
  32. if (perf_header__read(&self->header, self->fd) < 0) {
  33. pr_err("incompatible file format");
  34. goto out_close;
  35. }
  36. self->size = input_stat.st_size;
  37. return 0;
  38. out_close:
  39. close(self->fd);
  40. self->fd = -1;
  41. return -1;
  42. }
  43. struct perf_session *perf_session__new(const char *filename, int mode, bool force)
  44. {
  45. size_t len = filename ? strlen(filename) + 1 : 0;
  46. struct perf_session *self = zalloc(sizeof(*self) + len);
  47. if (self == NULL)
  48. goto out;
  49. if (perf_header__init(&self->header) < 0)
  50. goto out_free;
  51. memcpy(self->filename, filename, len);
  52. self->threads = RB_ROOT;
  53. self->stats_by_id = RB_ROOT;
  54. self->last_match = NULL;
  55. self->mmap_window = 32;
  56. self->cwd = NULL;
  57. self->cwdlen = 0;
  58. self->unknown_events = 0;
  59. map_groups__init(&self->kmaps);
  60. if (mode == O_RDONLY) {
  61. if (perf_session__open(self, force) < 0)
  62. goto out_delete;
  63. } else if (mode == O_WRONLY) {
  64. /*
  65. * In O_RDONLY mode this will be performed when reading the
  66. * kernel MMAP event, in event__process_mmap().
  67. */
  68. if (perf_session__create_kernel_maps(self) < 0)
  69. goto out_delete;
  70. }
  71. self->sample_type = perf_header__sample_type(&self->header);
  72. out:
  73. return self;
  74. out_free:
  75. free(self);
  76. return NULL;
  77. out_delete:
  78. perf_session__delete(self);
  79. return NULL;
  80. }
  81. void perf_session__delete(struct perf_session *self)
  82. {
  83. perf_header__exit(&self->header);
  84. close(self->fd);
  85. free(self->cwd);
  86. free(self);
  87. }
  88. static bool symbol__match_parent_regex(struct symbol *sym)
  89. {
  90. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  91. return 1;
  92. return 0;
  93. }
  94. struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
  95. struct thread *thread,
  96. struct ip_callchain *chain,
  97. struct symbol **parent)
  98. {
  99. u8 cpumode = PERF_RECORD_MISC_USER;
  100. unsigned int i;
  101. struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
  102. if (!syms)
  103. return NULL;
  104. for (i = 0; i < chain->nr; i++) {
  105. u64 ip = chain->ips[i];
  106. struct addr_location al;
  107. if (ip >= PERF_CONTEXT_MAX) {
  108. switch (ip) {
  109. case PERF_CONTEXT_HV:
  110. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  111. case PERF_CONTEXT_KERNEL:
  112. cpumode = PERF_RECORD_MISC_KERNEL; break;
  113. case PERF_CONTEXT_USER:
  114. cpumode = PERF_RECORD_MISC_USER; break;
  115. default:
  116. break;
  117. }
  118. continue;
  119. }
  120. thread__find_addr_location(thread, self, cpumode,
  121. MAP__FUNCTION, ip, &al, NULL);
  122. if (al.sym != NULL) {
  123. if (sort__has_parent && !*parent &&
  124. symbol__match_parent_regex(al.sym))
  125. *parent = al.sym;
  126. if (!symbol_conf.use_callchain)
  127. break;
  128. syms[i].map = al.map;
  129. syms[i].sym = al.sym;
  130. }
  131. }
  132. return syms;
  133. }
  134. static int process_event_stub(event_t *event __used,
  135. struct perf_session *session __used)
  136. {
  137. dump_printf(": unhandled!\n");
  138. return 0;
  139. }
  140. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  141. {
  142. if (handler->sample == NULL)
  143. handler->sample = process_event_stub;
  144. if (handler->mmap == NULL)
  145. handler->mmap = process_event_stub;
  146. if (handler->comm == NULL)
  147. handler->comm = process_event_stub;
  148. if (handler->fork == NULL)
  149. handler->fork = process_event_stub;
  150. if (handler->exit == NULL)
  151. handler->exit = process_event_stub;
  152. if (handler->lost == NULL)
  153. handler->lost = process_event_stub;
  154. if (handler->read == NULL)
  155. handler->read = process_event_stub;
  156. if (handler->throttle == NULL)
  157. handler->throttle = process_event_stub;
  158. if (handler->unthrottle == NULL)
  159. handler->unthrottle = process_event_stub;
  160. }
  161. static const char *event__name[] = {
  162. [0] = "TOTAL",
  163. [PERF_RECORD_MMAP] = "MMAP",
  164. [PERF_RECORD_LOST] = "LOST",
  165. [PERF_RECORD_COMM] = "COMM",
  166. [PERF_RECORD_EXIT] = "EXIT",
  167. [PERF_RECORD_THROTTLE] = "THROTTLE",
  168. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  169. [PERF_RECORD_FORK] = "FORK",
  170. [PERF_RECORD_READ] = "READ",
  171. [PERF_RECORD_SAMPLE] = "SAMPLE",
  172. };
  173. unsigned long event__total[PERF_RECORD_MAX];
  174. void event__print_totals(void)
  175. {
  176. int i;
  177. for (i = 0; i < PERF_RECORD_MAX; ++i)
  178. pr_info("%10s events: %10ld\n",
  179. event__name[i], event__total[i]);
  180. }
  181. void mem_bswap_64(void *src, int byte_size)
  182. {
  183. u64 *m = src;
  184. while (byte_size > 0) {
  185. *m = bswap_64(*m);
  186. byte_size -= sizeof(u64);
  187. ++m;
  188. }
  189. }
  190. static void event__all64_swap(event_t *self)
  191. {
  192. struct perf_event_header *hdr = &self->header;
  193. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  194. }
  195. static void event__comm_swap(event_t *self)
  196. {
  197. self->comm.pid = bswap_32(self->comm.pid);
  198. self->comm.tid = bswap_32(self->comm.tid);
  199. }
  200. static void event__mmap_swap(event_t *self)
  201. {
  202. self->mmap.pid = bswap_32(self->mmap.pid);
  203. self->mmap.tid = bswap_32(self->mmap.tid);
  204. self->mmap.start = bswap_64(self->mmap.start);
  205. self->mmap.len = bswap_64(self->mmap.len);
  206. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  207. }
  208. static void event__task_swap(event_t *self)
  209. {
  210. self->fork.pid = bswap_32(self->fork.pid);
  211. self->fork.tid = bswap_32(self->fork.tid);
  212. self->fork.ppid = bswap_32(self->fork.ppid);
  213. self->fork.ptid = bswap_32(self->fork.ptid);
  214. self->fork.time = bswap_64(self->fork.time);
  215. }
  216. static void event__read_swap(event_t *self)
  217. {
  218. self->read.pid = bswap_32(self->read.pid);
  219. self->read.tid = bswap_32(self->read.tid);
  220. self->read.value = bswap_64(self->read.value);
  221. self->read.time_enabled = bswap_64(self->read.time_enabled);
  222. self->read.time_running = bswap_64(self->read.time_running);
  223. self->read.id = bswap_64(self->read.id);
  224. }
  225. typedef void (*event__swap_op)(event_t *self);
  226. static event__swap_op event__swap_ops[] = {
  227. [PERF_RECORD_MMAP] = event__mmap_swap,
  228. [PERF_RECORD_COMM] = event__comm_swap,
  229. [PERF_RECORD_FORK] = event__task_swap,
  230. [PERF_RECORD_EXIT] = event__task_swap,
  231. [PERF_RECORD_LOST] = event__all64_swap,
  232. [PERF_RECORD_READ] = event__read_swap,
  233. [PERF_RECORD_SAMPLE] = event__all64_swap,
  234. [PERF_RECORD_MAX] = NULL,
  235. };
  236. static int perf_session__process_event(struct perf_session *self,
  237. event_t *event,
  238. struct perf_event_ops *ops,
  239. u64 offset, u64 head)
  240. {
  241. trace_event(event);
  242. if (event->header.type < PERF_RECORD_MAX) {
  243. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  244. offset + head, event->header.size,
  245. event__name[event->header.type]);
  246. ++event__total[0];
  247. ++event__total[event->header.type];
  248. }
  249. if (self->header.needs_swap && event__swap_ops[event->header.type])
  250. event__swap_ops[event->header.type](event);
  251. switch (event->header.type) {
  252. case PERF_RECORD_SAMPLE:
  253. return ops->sample(event, self);
  254. case PERF_RECORD_MMAP:
  255. return ops->mmap(event, self);
  256. case PERF_RECORD_COMM:
  257. return ops->comm(event, self);
  258. case PERF_RECORD_FORK:
  259. return ops->fork(event, self);
  260. case PERF_RECORD_EXIT:
  261. return ops->exit(event, self);
  262. case PERF_RECORD_LOST:
  263. return ops->lost(event, self);
  264. case PERF_RECORD_READ:
  265. return ops->read(event, self);
  266. case PERF_RECORD_THROTTLE:
  267. return ops->throttle(event, self);
  268. case PERF_RECORD_UNTHROTTLE:
  269. return ops->unthrottle(event, self);
  270. default:
  271. self->unknown_events++;
  272. return -1;
  273. }
  274. }
  275. void perf_event_header__bswap(struct perf_event_header *self)
  276. {
  277. self->type = bswap_32(self->type);
  278. self->misc = bswap_16(self->misc);
  279. self->size = bswap_16(self->size);
  280. }
  281. int perf_header__read_build_ids(struct perf_header *self,
  282. int input, u64 offset, u64 size)
  283. {
  284. struct build_id_event bev;
  285. char filename[PATH_MAX];
  286. u64 limit = offset + size;
  287. int err = -1;
  288. while (offset < limit) {
  289. struct dso *dso;
  290. ssize_t len;
  291. struct list_head *head = &dsos__user;
  292. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  293. goto out;
  294. if (self->needs_swap)
  295. perf_event_header__bswap(&bev.header);
  296. len = bev.header.size - sizeof(bev);
  297. if (read(input, filename, len) != len)
  298. goto out;
  299. if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
  300. head = &dsos__kernel;
  301. dso = __dsos__findnew(head, filename);
  302. if (dso != NULL) {
  303. dso__set_build_id(dso, &bev.build_id);
  304. if (head == &dsos__kernel && filename[0] == '[')
  305. dso->kernel = 1;
  306. }
  307. offset += bev.header.size;
  308. }
  309. err = 0;
  310. out:
  311. return err;
  312. }
  313. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  314. {
  315. struct thread *thread = perf_session__findnew(self, 0);
  316. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  317. pr_err("problem inserting idle task.\n");
  318. thread = NULL;
  319. }
  320. return thread;
  321. }
  322. int __perf_session__process_events(struct perf_session *self,
  323. u64 data_offset, u64 data_size,
  324. u64 file_size, struct perf_event_ops *ops)
  325. {
  326. int err, mmap_prot, mmap_flags;
  327. u64 head, shift;
  328. u64 offset = 0;
  329. size_t page_size;
  330. event_t *event;
  331. uint32_t size;
  332. char *buf;
  333. struct ui_progress *progress = ui_progress__new("Processing events...",
  334. self->size);
  335. if (progress == NULL)
  336. return -1;
  337. perf_event_ops__fill_defaults(ops);
  338. page_size = sysconf(_SC_PAGESIZE);
  339. head = data_offset;
  340. shift = page_size * (head / page_size);
  341. offset += shift;
  342. head -= shift;
  343. mmap_prot = PROT_READ;
  344. mmap_flags = MAP_SHARED;
  345. if (self->header.needs_swap) {
  346. mmap_prot |= PROT_WRITE;
  347. mmap_flags = MAP_PRIVATE;
  348. }
  349. remap:
  350. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  351. mmap_flags, self->fd, offset);
  352. if (buf == MAP_FAILED) {
  353. pr_err("failed to mmap file\n");
  354. err = -errno;
  355. goto out_err;
  356. }
  357. more:
  358. event = (event_t *)(buf + head);
  359. ui_progress__update(progress, offset);
  360. if (self->header.needs_swap)
  361. perf_event_header__bswap(&event->header);
  362. size = event->header.size;
  363. if (size == 0)
  364. size = 8;
  365. if (head + event->header.size >= page_size * self->mmap_window) {
  366. int munmap_ret;
  367. shift = page_size * (head / page_size);
  368. munmap_ret = munmap(buf, page_size * self->mmap_window);
  369. assert(munmap_ret == 0);
  370. offset += shift;
  371. head -= shift;
  372. goto remap;
  373. }
  374. size = event->header.size;
  375. dump_printf("\n%#Lx [%#x]: event: %d\n",
  376. offset + head, event->header.size, event->header.type);
  377. if (size == 0 ||
  378. perf_session__process_event(self, event, ops, offset, head) < 0) {
  379. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  380. offset + head, event->header.size,
  381. event->header.type);
  382. /*
  383. * assume we lost track of the stream, check alignment, and
  384. * increment a single u64 in the hope to catch on again 'soon'.
  385. */
  386. if (unlikely(head & 7))
  387. head &= ~7ULL;
  388. size = 8;
  389. }
  390. head += size;
  391. if (offset + head >= data_offset + data_size)
  392. goto done;
  393. if (offset + head < file_size)
  394. goto more;
  395. done:
  396. err = 0;
  397. out_err:
  398. ui_progress__delete(progress);
  399. return err;
  400. }
  401. int perf_session__process_events(struct perf_session *self,
  402. struct perf_event_ops *ops)
  403. {
  404. int err;
  405. if (perf_session__register_idle_thread(self) == NULL)
  406. return -ENOMEM;
  407. if (!symbol_conf.full_paths) {
  408. char bf[PATH_MAX];
  409. if (getcwd(bf, sizeof(bf)) == NULL) {
  410. err = -errno;
  411. out_getcwd_err:
  412. pr_err("failed to get the current directory\n");
  413. goto out_err;
  414. }
  415. self->cwd = strdup(bf);
  416. if (self->cwd == NULL) {
  417. err = -ENOMEM;
  418. goto out_getcwd_err;
  419. }
  420. self->cwdlen = strlen(self->cwd);
  421. }
  422. err = __perf_session__process_events(self, self->header.data_offset,
  423. self->header.data_size,
  424. self->size, ops);
  425. out_err:
  426. return err;
  427. }
  428. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  429. {
  430. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  431. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  432. return false;
  433. }
  434. return true;
  435. }
  436. int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
  437. const char *symbol_name,
  438. u64 addr)
  439. {
  440. char *bracket;
  441. enum map_type i;
  442. self->ref_reloc_sym.name = strdup(symbol_name);
  443. if (self->ref_reloc_sym.name == NULL)
  444. return -ENOMEM;
  445. bracket = strchr(self->ref_reloc_sym.name, ']');
  446. if (bracket)
  447. *bracket = '\0';
  448. self->ref_reloc_sym.addr = addr;
  449. for (i = 0; i < MAP__NR_TYPES; ++i) {
  450. struct kmap *kmap = map__kmap(self->vmlinux_maps[i]);
  451. kmap->ref_reloc_sym = &self->ref_reloc_sym;
  452. }
  453. return 0;
  454. }