session.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. #define _LARGEFILE64_SOURCE
  2. #define _FILE_OFFSET_BITS 64
  3. #include <linux/kernel.h>
  4. #include <byteswap.h>
  5. #include <unistd.h>
  6. #include <sys/types.h>
  7. #include "session.h"
  8. #include "sort.h"
  9. #include "util.h"
  10. static int perf_session__open(struct perf_session *self, bool force)
  11. {
  12. struct stat input_stat;
  13. self->fd = open(self->filename, O_RDONLY|O_LARGEFILE);
  14. if (self->fd < 0) {
  15. pr_err("failed to open file: %s", self->filename);
  16. if (!strcmp(self->filename, "perf.data"))
  17. pr_err(" (try 'perf record' first)");
  18. pr_err("\n");
  19. return -errno;
  20. }
  21. if (fstat(self->fd, &input_stat) < 0)
  22. goto out_close;
  23. if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
  24. pr_err("file %s not owned by current user or root\n",
  25. self->filename);
  26. goto out_close;
  27. }
  28. if (!input_stat.st_size) {
  29. pr_info("zero-sized file (%s), nothing to do!\n",
  30. self->filename);
  31. goto out_close;
  32. }
  33. if (perf_header__read(&self->header, self->fd) < 0) {
  34. pr_err("incompatible file format");
  35. goto out_close;
  36. }
  37. self->size = input_stat.st_size;
  38. return 0;
  39. out_close:
  40. close(self->fd);
  41. self->fd = -1;
  42. return -1;
  43. }
  44. struct perf_session *perf_session__new(const char *filename, int mode, bool force)
  45. {
  46. size_t len = filename ? strlen(filename) + 1 : 0;
  47. struct perf_session *self = zalloc(sizeof(*self) + len);
  48. if (self == NULL)
  49. goto out;
  50. if (perf_header__init(&self->header) < 0)
  51. goto out_free;
  52. memcpy(self->filename, filename, len);
  53. self->threads = RB_ROOT;
  54. self->last_match = NULL;
  55. self->mmap_window = 32;
  56. self->cwd = NULL;
  57. self->cwdlen = 0;
  58. self->unknown_events = 0;
  59. map_groups__init(&self->kmaps);
  60. if (mode == O_RDONLY) {
  61. if (perf_session__open(self, force) < 0)
  62. goto out_delete;
  63. } else if (mode == O_WRONLY) {
  64. /*
  65. * In O_RDONLY mode this will be performed when reading the
  66. * kernel MMAP event, in event__process_mmap().
  67. */
  68. if (perf_session__create_kernel_maps(self) < 0)
  69. goto out_delete;
  70. }
  71. self->sample_type = perf_header__sample_type(&self->header);
  72. out:
  73. return self;
  74. out_free:
  75. free(self);
  76. return NULL;
  77. out_delete:
  78. perf_session__delete(self);
  79. return NULL;
  80. }
  81. void perf_session__delete(struct perf_session *self)
  82. {
  83. perf_header__exit(&self->header);
  84. close(self->fd);
  85. free(self->cwd);
  86. free(self);
  87. }
  88. static bool symbol__match_parent_regex(struct symbol *sym)
  89. {
  90. if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
  91. return 1;
  92. return 0;
  93. }
  94. struct symbol **perf_session__resolve_callchain(struct perf_session *self,
  95. struct thread *thread,
  96. struct ip_callchain *chain,
  97. struct symbol **parent)
  98. {
  99. u8 cpumode = PERF_RECORD_MISC_USER;
  100. struct symbol **syms = NULL;
  101. unsigned int i;
  102. if (symbol_conf.use_callchain) {
  103. syms = calloc(chain->nr, sizeof(*syms));
  104. if (!syms) {
  105. fprintf(stderr, "Can't allocate memory for symbols\n");
  106. exit(-1);
  107. }
  108. }
  109. for (i = 0; i < chain->nr; i++) {
  110. u64 ip = chain->ips[i];
  111. struct addr_location al;
  112. if (ip >= PERF_CONTEXT_MAX) {
  113. switch (ip) {
  114. case PERF_CONTEXT_HV:
  115. cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
  116. case PERF_CONTEXT_KERNEL:
  117. cpumode = PERF_RECORD_MISC_KERNEL; break;
  118. case PERF_CONTEXT_USER:
  119. cpumode = PERF_RECORD_MISC_USER; break;
  120. default:
  121. break;
  122. }
  123. continue;
  124. }
  125. thread__find_addr_location(thread, self, cpumode,
  126. MAP__FUNCTION, ip, &al, NULL);
  127. if (al.sym != NULL) {
  128. if (sort__has_parent && !*parent &&
  129. symbol__match_parent_regex(al.sym))
  130. *parent = al.sym;
  131. if (!symbol_conf.use_callchain)
  132. break;
  133. syms[i] = al.sym;
  134. }
  135. }
  136. return syms;
  137. }
  138. static int process_event_stub(event_t *event __used,
  139. struct perf_session *session __used)
  140. {
  141. dump_printf(": unhandled!\n");
  142. return 0;
  143. }
  144. static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
  145. {
  146. if (handler->sample == NULL)
  147. handler->sample = process_event_stub;
  148. if (handler->mmap == NULL)
  149. handler->mmap = process_event_stub;
  150. if (handler->comm == NULL)
  151. handler->comm = process_event_stub;
  152. if (handler->fork == NULL)
  153. handler->fork = process_event_stub;
  154. if (handler->exit == NULL)
  155. handler->exit = process_event_stub;
  156. if (handler->lost == NULL)
  157. handler->lost = process_event_stub;
  158. if (handler->read == NULL)
  159. handler->read = process_event_stub;
  160. if (handler->throttle == NULL)
  161. handler->throttle = process_event_stub;
  162. if (handler->unthrottle == NULL)
  163. handler->unthrottle = process_event_stub;
  164. }
  165. static const char *event__name[] = {
  166. [0] = "TOTAL",
  167. [PERF_RECORD_MMAP] = "MMAP",
  168. [PERF_RECORD_LOST] = "LOST",
  169. [PERF_RECORD_COMM] = "COMM",
  170. [PERF_RECORD_EXIT] = "EXIT",
  171. [PERF_RECORD_THROTTLE] = "THROTTLE",
  172. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  173. [PERF_RECORD_FORK] = "FORK",
  174. [PERF_RECORD_READ] = "READ",
  175. [PERF_RECORD_SAMPLE] = "SAMPLE",
  176. };
  177. unsigned long event__total[PERF_RECORD_MAX];
  178. void event__print_totals(void)
  179. {
  180. int i;
  181. for (i = 0; i < PERF_RECORD_MAX; ++i)
  182. pr_info("%10s events: %10ld\n",
  183. event__name[i], event__total[i]);
  184. }
  185. void mem_bswap_64(void *src, int byte_size)
  186. {
  187. u64 *m = src;
  188. while (byte_size > 0) {
  189. *m = bswap_64(*m);
  190. byte_size -= sizeof(u64);
  191. ++m;
  192. }
  193. }
  194. static void event__all64_swap(event_t *self)
  195. {
  196. struct perf_event_header *hdr = &self->header;
  197. mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
  198. }
  199. static void event__comm_swap(event_t *self)
  200. {
  201. self->comm.pid = bswap_32(self->comm.pid);
  202. self->comm.tid = bswap_32(self->comm.tid);
  203. }
  204. static void event__mmap_swap(event_t *self)
  205. {
  206. self->mmap.pid = bswap_32(self->mmap.pid);
  207. self->mmap.tid = bswap_32(self->mmap.tid);
  208. self->mmap.start = bswap_64(self->mmap.start);
  209. self->mmap.len = bswap_64(self->mmap.len);
  210. self->mmap.pgoff = bswap_64(self->mmap.pgoff);
  211. }
  212. static void event__task_swap(event_t *self)
  213. {
  214. self->fork.pid = bswap_32(self->fork.pid);
  215. self->fork.tid = bswap_32(self->fork.tid);
  216. self->fork.ppid = bswap_32(self->fork.ppid);
  217. self->fork.ptid = bswap_32(self->fork.ptid);
  218. self->fork.time = bswap_64(self->fork.time);
  219. }
  220. static void event__read_swap(event_t *self)
  221. {
  222. self->read.pid = bswap_32(self->read.pid);
  223. self->read.tid = bswap_32(self->read.tid);
  224. self->read.value = bswap_64(self->read.value);
  225. self->read.time_enabled = bswap_64(self->read.time_enabled);
  226. self->read.time_running = bswap_64(self->read.time_running);
  227. self->read.id = bswap_64(self->read.id);
  228. }
  229. typedef void (*event__swap_op)(event_t *self);
  230. static event__swap_op event__swap_ops[] = {
  231. [PERF_RECORD_MMAP] = event__mmap_swap,
  232. [PERF_RECORD_COMM] = event__comm_swap,
  233. [PERF_RECORD_FORK] = event__task_swap,
  234. [PERF_RECORD_EXIT] = event__task_swap,
  235. [PERF_RECORD_LOST] = event__all64_swap,
  236. [PERF_RECORD_READ] = event__read_swap,
  237. [PERF_RECORD_SAMPLE] = event__all64_swap,
  238. [PERF_RECORD_MAX] = NULL,
  239. };
  240. static int perf_session__process_event(struct perf_session *self,
  241. event_t *event,
  242. struct perf_event_ops *ops,
  243. u64 offset, u64 head)
  244. {
  245. trace_event(event);
  246. if (event->header.type < PERF_RECORD_MAX) {
  247. dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
  248. offset + head, event->header.size,
  249. event__name[event->header.type]);
  250. ++event__total[0];
  251. ++event__total[event->header.type];
  252. }
  253. if (self->header.needs_swap && event__swap_ops[event->header.type])
  254. event__swap_ops[event->header.type](event);
  255. switch (event->header.type) {
  256. case PERF_RECORD_SAMPLE:
  257. return ops->sample(event, self);
  258. case PERF_RECORD_MMAP:
  259. return ops->mmap(event, self);
  260. case PERF_RECORD_COMM:
  261. return ops->comm(event, self);
  262. case PERF_RECORD_FORK:
  263. return ops->fork(event, self);
  264. case PERF_RECORD_EXIT:
  265. return ops->exit(event, self);
  266. case PERF_RECORD_LOST:
  267. return ops->lost(event, self);
  268. case PERF_RECORD_READ:
  269. return ops->read(event, self);
  270. case PERF_RECORD_THROTTLE:
  271. return ops->throttle(event, self);
  272. case PERF_RECORD_UNTHROTTLE:
  273. return ops->unthrottle(event, self);
  274. default:
  275. self->unknown_events++;
  276. return -1;
  277. }
  278. }
  279. void perf_event_header__bswap(struct perf_event_header *self)
  280. {
  281. self->type = bswap_32(self->type);
  282. self->misc = bswap_16(self->misc);
  283. self->size = bswap_16(self->size);
  284. }
  285. int perf_header__read_build_ids(struct perf_header *self,
  286. int input, u64 offset, u64 size)
  287. {
  288. struct build_id_event bev;
  289. char filename[PATH_MAX];
  290. u64 limit = offset + size;
  291. int err = -1;
  292. while (offset < limit) {
  293. struct dso *dso;
  294. ssize_t len;
  295. struct list_head *head = &dsos__user;
  296. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  297. goto out;
  298. if (self->needs_swap)
  299. perf_event_header__bswap(&bev.header);
  300. len = bev.header.size - sizeof(bev);
  301. if (read(input, filename, len) != len)
  302. goto out;
  303. if (bev.header.misc & PERF_RECORD_MISC_KERNEL)
  304. head = &dsos__kernel;
  305. dso = __dsos__findnew(head, filename);
  306. if (dso != NULL) {
  307. dso__set_build_id(dso, &bev.build_id);
  308. if (head == &dsos__kernel && filename[0] == '[')
  309. dso->kernel = 1;
  310. }
  311. offset += bev.header.size;
  312. }
  313. err = 0;
  314. out:
  315. return err;
  316. }
  317. static struct thread *perf_session__register_idle_thread(struct perf_session *self)
  318. {
  319. struct thread *thread = perf_session__findnew(self, 0);
  320. if (thread == NULL || thread__set_comm(thread, "swapper")) {
  321. pr_err("problem inserting idle task.\n");
  322. thread = NULL;
  323. }
  324. return thread;
  325. }
  326. int perf_session__process_events(struct perf_session *self,
  327. struct perf_event_ops *ops)
  328. {
  329. int err, mmap_prot, mmap_flags;
  330. u64 head, shift;
  331. u64 offset = 0;
  332. size_t page_size;
  333. event_t *event;
  334. uint32_t size;
  335. char *buf;
  336. if (perf_session__register_idle_thread(self) == NULL)
  337. return -ENOMEM;
  338. perf_event_ops__fill_defaults(ops);
  339. page_size = sysconf(_SC_PAGESIZE);
  340. head = self->header.data_offset;
  341. if (!symbol_conf.full_paths) {
  342. char bf[PATH_MAX];
  343. if (getcwd(bf, sizeof(bf)) == NULL) {
  344. err = -errno;
  345. out_getcwd_err:
  346. pr_err("failed to get the current directory\n");
  347. goto out_err;
  348. }
  349. self->cwd = strdup(bf);
  350. if (self->cwd == NULL) {
  351. err = -ENOMEM;
  352. goto out_getcwd_err;
  353. }
  354. self->cwdlen = strlen(self->cwd);
  355. }
  356. shift = page_size * (head / page_size);
  357. offset += shift;
  358. head -= shift;
  359. mmap_prot = PROT_READ;
  360. mmap_flags = MAP_SHARED;
  361. if (self->header.needs_swap) {
  362. mmap_prot |= PROT_WRITE;
  363. mmap_flags = MAP_PRIVATE;
  364. }
  365. remap:
  366. buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
  367. mmap_flags, self->fd, offset);
  368. if (buf == MAP_FAILED) {
  369. pr_err("failed to mmap file\n");
  370. err = -errno;
  371. goto out_err;
  372. }
  373. more:
  374. event = (event_t *)(buf + head);
  375. if (self->header.needs_swap)
  376. perf_event_header__bswap(&event->header);
  377. size = event->header.size;
  378. if (size == 0)
  379. size = 8;
  380. if (head + event->header.size >= page_size * self->mmap_window) {
  381. int munmap_ret;
  382. shift = page_size * (head / page_size);
  383. munmap_ret = munmap(buf, page_size * self->mmap_window);
  384. assert(munmap_ret == 0);
  385. offset += shift;
  386. head -= shift;
  387. goto remap;
  388. }
  389. size = event->header.size;
  390. dump_printf("\n%#Lx [%#x]: event: %d\n",
  391. offset + head, event->header.size, event->header.type);
  392. if (size == 0 ||
  393. perf_session__process_event(self, event, ops, offset, head) < 0) {
  394. dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
  395. offset + head, event->header.size,
  396. event->header.type);
  397. /*
  398. * assume we lost track of the stream, check alignment, and
  399. * increment a single u64 in the hope to catch on again 'soon'.
  400. */
  401. if (unlikely(head & 7))
  402. head &= ~7ULL;
  403. size = 8;
  404. }
  405. head += size;
  406. if (offset + head >= self->header.data_offset + self->header.data_size)
  407. goto done;
  408. if (offset + head < self->size)
  409. goto more;
  410. done:
  411. err = 0;
  412. out_err:
  413. return err;
  414. }
  415. bool perf_session__has_traces(struct perf_session *self, const char *msg)
  416. {
  417. if (!(self->sample_type & PERF_SAMPLE_RAW)) {
  418. pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
  419. return false;
  420. }
  421. return true;
  422. }
  423. int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self,
  424. const char *symbol_name,
  425. u64 addr)
  426. {
  427. char *bracket;
  428. self->ref_reloc_sym.name = strdup(symbol_name);
  429. if (self->ref_reloc_sym.name == NULL)
  430. return -ENOMEM;
  431. bracket = strchr(self->ref_reloc_sym.name, ']');
  432. if (bracket)
  433. *bracket = '\0';
  434. self->ref_reloc_sym.addr = addr;
  435. return 0;
  436. }
  437. static u64 map__reloc_map_ip(struct map *map, u64 ip)
  438. {
  439. return ip + (s64)map->pgoff;
  440. }
  441. static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
  442. {
  443. return ip - (s64)map->pgoff;
  444. }
  445. void perf_session__reloc_vmlinux_maps(struct perf_session *self,
  446. u64 unrelocated_addr)
  447. {
  448. enum map_type type;
  449. s64 reloc = unrelocated_addr - self->ref_reloc_sym.addr;
  450. if (!reloc)
  451. return;
  452. for (type = 0; type < MAP__NR_TYPES; ++type) {
  453. struct map *map = self->vmlinux_maps[type];
  454. map->map_ip = map__reloc_map_ip;
  455. map->unmap_ip = map__reloc_unmap_ip;
  456. map->pgoff = reloc;
  457. }
  458. }