session.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. #include <linux/kernel.h>
  2. #include <byteswap.h>
  3. #include <unistd.h>
  4. #include <sys/types.h>
  5. #include "session.h"
  6. #include "sort.h"
  7. #include "util.h"
  8. static int perf_session__open(struct perf_session *self, bool force)
  9. {
  10. struct stat input_stat;
  11. self->fd = open(self->filename, O_RDONLY);
  12. if (self->fd < 0) {
  13. pr_err("failed to open file: %s", self->filename);
  14. if (!strcmp(self->filename, "perf.data"))
  15. pr_err(" (try 'perf record' first)");
  16. pr_err("\n");
  17. return -errno;
  18. }
  19. if (fstat(self->fd, &input_stat) < 0)
  20. goto out_close;
  21. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  22. pr_err("file %s not owned by current user or root\n",
  23. self->filename);
  24. goto out_close;
  25. }
  26. if (!input_stat.st_size) {
  27. pr_info("zero-sized file (%s), nothing to do!\n",
  28. self->filename);
  29. goto out_close;
  30. }
  31. if (perf_header__read(&self->header, self->fd) < 0) {
  32. pr_err("incompatible file format");
  33. goto out_close;
  34. }
  35. self->size = input_stat.st_size;
  36. return 0;
  37. out_close:
  38. close(self->fd);
  39. self->fd = -1;
  40. return -1;
  41. }
  42. struct perf_session *perf_session__new(const char *filename, int mode, bool force)
  43. {
  44. size_t len = filename ? strlen(filename) + 1 : 0;
  45. struct perf_session *self = zalloc(sizeof(*self) + len);
  46. if (self == NULL)
  47. goto out;
  48. if (perf_header__init(&self->header) < 0)
  49. goto out_free;
  50. memcpy(self->filename, filename, len);
  51. self->threads = RB_ROOT;
  52. self->last_match = NULL;
  53. self->mmap_window = 32;
  54. self->cwd = NULL;
  55. self->cwdlen = 0;
  56. self->unknown_events = 0;
  57. map_groups__init(&self->kmaps);
  58. if (mode == O_RDONLY && perf_session__open(self, force) < 0)
  59. goto out_delete;
  60. self->sample_type = perf_header__sample_type(&self->header);
  61. out:
  62. return self;
  63. out_free:
  64. free(self);
  65. return NULL;
  66. out_delete:
  67. perf_session__delete(self);
  68. return NULL;
  69. }
  70. void perf_session__delete(struct perf_session *self)
  71. {
  72. perf_header__exit(&self->header);
  73. close(self->fd);
  74. free(self->cwd);
  75. free(self);
  76. }
  77. static bool symbol__match_parent_regex(struct symbol *sym)
  78. {
  79. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  80. return 1;
  81. return 0;
  82. }
  83. struct symbol **perf_session__resolve_callchain(struct perf_session *self,
  84. struct thread *thread,
  85. struct ip_callchain *chain,
  86. struct symbol **parent)
  87. {
  88. u8 cpumode = PERF_RECORD_MISC_USER;
  89. struct symbol **syms = NULL;
  90. unsigned int i;
  91. if (symbol_conf.use_callchain) {
  92. syms = calloc(chain->nr, sizeof(*syms));
  93. if (!syms) {
  94. fprintf(stderr, "Can't allocate memory for symbols\n");
  95. exit(-1);
  96. }
  97. }
  98. for (i = 0; i < chain->nr; i++) {
  99. u64 ip = chain->ips[i];
  100. struct addr_location al;
  101. if (ip >= PERF_CONTEXT_MAX) {
  102. switch (ip) {
  103. case PERF_CONTEXT_HV:
  104. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  105. case PERF_CONTEXT_KERNEL:
  106. cpumode = PERF_RECORD_MISC_KERNEL; break;
  107. case PERF_CONTEXT_USER:
  108. cpumode = PERF_RECORD_MISC_USER; break;
  109. default:
  110. break;
  111. }
  112. continue;
  113. }
  114. thread__find_addr_location(thread, self, cpumode,
  115. MAP__FUNCTION, ip, &al, NULL);
  116. if (al.sym != NULL) {
  117. if (sort__has_parent && !*parent &&
  118. symbol__match_parent_regex(al.sym))
  119. *parent = al.sym;
  120. if (!symbol_conf.use_callchain)
  121. break;
  122. syms[i] = al.sym;
  123. }
  124. }
  125. return syms;
  126. }
  127. static int process_event_stub(event_t *event __used,
  128. struct perf_session *session __used)
  129. {
  130. dump_printf(": unhandled!\n");
  131. return 0;
  132. }
  133. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  134. {
  135. if (handler->sample == NULL)
  136. handler->sample = process_event_stub;
  137. if (handler->mmap == NULL)
  138. handler->mmap = process_event_stub;
  139. if (handler->comm == NULL)
  140. handler->comm = process_event_stub;
  141. if (handler->fork == NULL)
  142. handler->fork = process_event_stub;
  143. if (handler->exit == NULL)
  144. handler->exit = process_event_stub;
  145. if (handler->lost == NULL)
  146. handler->lost = process_event_stub;
  147. if (handler->read == NULL)
  148. handler->read = process_event_stub;
  149. if (handler->throttle == NULL)
  150. handler->throttle = process_event_stub;
  151. if (handler->unthrottle == NULL)
  152. handler->unthrottle = process_event_stub;
  153. }
  154. static const char *event__name[] = {
  155. [0] = "TOTAL",
  156. [PERF_RECORD_MMAP] = "MMAP",
  157. [PERF_RECORD_LOST] = "LOST",
  158. [PERF_RECORD_COMM] = "COMM",
  159. [PERF_RECORD_EXIT] = "EXIT",
  160. [PERF_RECORD_THROTTLE] = "THROTTLE",
  161. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  162. [PERF_RECORD_FORK] = "FORK",
  163. [PERF_RECORD_READ] = "READ",
  164. [PERF_RECORD_SAMPLE] = "SAMPLE",
  165. };
  166. unsigned long event__total[PERF_RECORD_MAX];
  167. void event__print_totals(void)
  168. {
  169. int i;
  170. for (i = 0; i < PERF_RECORD_MAX; ++i)
  171. pr_info("%10s events: %10ld\n",
  172. event__name[i], event__total[i]);
  173. }
  174. void mem_bswap_64(void *src, int byte_size)
  175. {
  176. u64 *m = src;
  177. while (byte_size > 0) {
  178. *m = bswap_64(*m);
  179. byte_size -= sizeof(u64);
  180. ++m;
  181. }
  182. }
  183. static void event__all64_swap(event_t *self)
  184. {
  185. struct perf_event_header *hdr = &self->header;
  186. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  187. }
  188. static void event__comm_swap(event_t *self)
  189. {
  190. self->comm.pid = bswap_32(self->comm.pid);
  191. self->comm.tid = bswap_32(self->comm.tid);
  192. }
  193. static void event__mmap_swap(event_t *self)
  194. {
  195. self->mmap.pid = bswap_32(self->mmap.pid);
  196. self->mmap.tid = bswap_32(self->mmap.tid);
  197. self->mmap.start = bswap_64(self->mmap.start);
  198. self->mmap.len = bswap_64(self->mmap.len);
  199. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  200. }
  201. static void event__task_swap(event_t *self)
  202. {
  203. self->fork.pid = bswap_32(self->fork.pid);
  204. self->fork.tid = bswap_32(self->fork.tid);
  205. self->fork.ppid = bswap_32(self->fork.ppid);
  206. self->fork.ptid = bswap_32(self->fork.ptid);
  207. self->fork.time = bswap_64(self->fork.time);
  208. }
  209. static void event__read_swap(event_t *self)
  210. {
  211. self->read.pid = bswap_32(self->read.pid);
  212. self->read.tid = bswap_32(self->read.tid);
  213. self->read.value = bswap_64(self->read.value);
  214. self->read.time_enabled = bswap_64(self->read.time_enabled);
  215. self->read.time_running = bswap_64(self->read.time_running);
  216. self->read.id = bswap_64(self->read.id);
  217. }
  218. typedef void (*event__swap_op)(event_t *self);
  219. static event__swap_op event__swap_ops[] = {
  220. [PERF_RECORD_MMAP] = event__mmap_swap,
  221. [PERF_RECORD_COMM] = event__comm_swap,
  222. [PERF_RECORD_FORK] = event__task_swap,
  223. [PERF_RECORD_EXIT] = event__task_swap,
  224. [PERF_RECORD_LOST] = event__all64_swap,
  225. [PERF_RECORD_READ] = event__read_swap,
  226. [PERF_RECORD_SAMPLE] = event__all64_swap,
  227. [PERF_RECORD_MAX] = NULL,
  228. };
  229. static int perf_session__process_event(struct perf_session *self,
  230. event_t *event,
  231. struct perf_event_ops *ops,
  232. u64 offset, u64 head)
  233. {
  234. trace_event(event);
  235. if (event->header.type < PERF_RECORD_MAX) {
  236. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  237. offset + head, event->header.size,
  238. event__name[event->header.type]);
  239. ++event__total[0];
  240. ++event__total[event->header.type];
  241. }
  242. if (self->header.needs_swap && event__swap_ops[event->header.type])
  243. event__swap_ops[event->header.type](event);
  244. switch (event->header.type) {
  245. case PERF_RECORD_SAMPLE:
  246. return ops->sample(event, self);
  247. case PERF_RECORD_MMAP:
  248. return ops->mmap(event, self);
  249. case PERF_RECORD_COMM:
  250. return ops->comm(event, self);
  251. case PERF_RECORD_FORK:
  252. return ops->fork(event, self);
  253. case PERF_RECORD_EXIT:
  254. return ops->exit(event, self);
  255. case PERF_RECORD_LOST:
  256. return ops->lost(event, self);
  257. case PERF_RECORD_READ:
  258. return ops->read(event, self);
  259. case PERF_RECORD_THROTTLE:
  260. return ops->throttle(event, self);
  261. case PERF_RECORD_UNTHROTTLE:
  262. return ops->unthrottle(event, self);
  263. default:
  264. self->unknown_events++;
  265. return -1;
  266. }
  267. }
  268. void perf_event_header__bswap(struct perf_event_header *self)
  269. {
  270. self->type = bswap_32(self->type);
  271. self->misc = bswap_16(self->misc);
  272. self->size = bswap_16(self->size);
  273. }
  274. int perf_header__read_build_ids(struct perf_header *self,
  275. int input, u64 offset, u64 size)
  276. {
  277. struct build_id_event bev;
  278. char filename[PATH_MAX];
  279. u64 limit = offset + size;
  280. int err = -1;
  281. while (offset < limit) {
  282. struct dso *dso;
  283. ssize_t len;
  284. struct list_head *head = &dsos__user;
  285. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  286. goto out;
  287. if (self->needs_swap)
  288. perf_event_header__bswap(&bev.header);
  289. len = bev.header.size - sizeof(bev);
  290. if (read(input, filename, len) != len)
  291. goto out;
  292. if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
  293. head = &dsos__kernel;
  294. dso = __dsos__findnew(head, filename);
  295. if (dso != NULL) {
  296. dso__set_build_id(dso, &bev.build_id);
  297. if (head == &dsos__kernel && filename[0] == '[')
  298. dso->kernel = 1;
  299. }
  300. offset += bev.header.size;
  301. }
  302. err = 0;
  303. out:
  304. return err;
  305. }
  306. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  307. {
  308. struct thread *thread = perf_session__findnew(self, 0);
  309. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  310. pr_err("problem inserting idle task.\n");
  311. thread = NULL;
  312. }
  313. return thread;
  314. }
  315. int perf_session__process_events(struct perf_session *self,
  316. struct perf_event_ops *ops)
  317. {
  318. int err, mmap_prot, mmap_flags;
  319. u64 head, shift;
  320. u64 offset = 0;
  321. size_t page_size;
  322. event_t *event;
  323. uint32_t size;
  324. char *buf;
  325. if (perf_session__register_idle_thread(self) == NULL)
  326. return -ENOMEM;
  327. perf_event_ops__fill_defaults(ops);
  328. page_size = sysconf(_SC_PAGESIZE);
  329. head = self->header.data_offset;
  330. if (!symbol_conf.full_paths) {
  331. char bf[PATH_MAX];
  332. if (getcwd(bf, sizeof(bf)) == NULL) {
  333. err = -errno;
  334. out_getcwd_err:
  335. pr_err("failed to get the current directory\n");
  336. goto out_err;
  337. }
  338. self->cwd = strdup(bf);
  339. if (self->cwd == NULL) {
  340. err = -ENOMEM;
  341. goto out_getcwd_err;
  342. }
  343. self->cwdlen = strlen(self->cwd);
  344. }
  345. shift = page_size * (head / page_size);
  346. offset += shift;
  347. head -= shift;
  348. mmap_prot = PROT_READ;
  349. mmap_flags = MAP_SHARED;
  350. if (self->header.needs_swap) {
  351. mmap_prot |= PROT_WRITE;
  352. mmap_flags = MAP_PRIVATE;
  353. }
  354. remap:
  355. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  356. mmap_flags, self->fd, offset);
  357. if (buf == MAP_FAILED) {
  358. pr_err("failed to mmap file\n");
  359. err = -errno;
  360. goto out_err;
  361. }
  362. more:
  363. event = (event_t *)(buf + head);
  364. if (self->header.needs_swap)
  365. perf_event_header__bswap(&event->header);
  366. size = event->header.size;
  367. if (size == 0)
  368. size = 8;
  369. if (head + event->header.size >= page_size * self->mmap_window) {
  370. int munmap_ret;
  371. shift = page_size * (head / page_size);
  372. munmap_ret = munmap(buf, page_size * self->mmap_window);
  373. assert(munmap_ret == 0);
  374. offset += shift;
  375. head -= shift;
  376. goto remap;
  377. }
  378. size = event->header.size;
  379. dump_printf("\n%#Lx [%#x]: event: %d\n",
  380. offset + head, event->header.size, event->header.type);
  381. if (size == 0 ||
  382. perf_session__process_event(self, event, ops, offset, head) < 0) {
  383. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  384. offset + head, event->header.size,
  385. event->header.type);
  386. /*
  387. * assume we lost track of the stream, check alignment, and
  388. * increment a single u64 in the hope to catch on again 'soon'.
  389. */
  390. if (unlikely(head & 7))
  391. head &= ~7ULL;
  392. size = 8;
  393. }
  394. head += size;
  395. if (offset + head >= self->header.data_offset + self->header.data_size)
  396. goto done;
  397. if (offset + head < self->size)
  398. goto more;
  399. done:
  400. err = 0;
  401. out_err:
  402. return err;
  403. }
  404. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  405. {
  406. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  407. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  408. return false;
  409. }
  410. return true;
  411. }
  412. int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
  413. const char *symbol_name,
  414. u64 addr)
  415. {
  416. char *bracket;
  417. self->ref_reloc_sym.name = strdup(symbol_name);
  418. if (self->ref_reloc_sym.name == NULL)
  419. return -ENOMEM;
  420. bracket = strchr(self->ref_reloc_sym.name, ']');
  421. if (bracket)
  422. *bracket = '\0';
  423. self->ref_reloc_sym.addr = addr;
  424. return 0;
  425. }
  426. static u64 map__reloc_map_ip(struct map *map, u64 ip)
  427. {
  428. return ip + (s64)map->pgoff;
  429. }
  430. static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
  431. {
  432. return ip - (s64)map->pgoff;
  433. }
  434. void perf_session__reloc_vmlinux_maps(struct perf_session *self,
  435. u64 unrelocated_addr)
  436. {
  437. enum map_type type;
  438. s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
  439. if (!reloc)
  440. return;
  441. for (type = 0; type < MAP__NR_TYPES; ++type) {
  442. struct map *map = self->vmlinux_maps[type];
  443. map->map_ip = map__reloc_map_ip;
  444. map->unmap_ip = map__reloc_unmap_ip;
  445. map->pgoff = reloc;
  446. }
  447. }