event.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. #include <linux/types.h>
  2. #include "event.h"
  3. #include "debug.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "string.h"
  7. #include "strlist.h"
  8. #include "thread.h"
  9. const char *event__name[] = {
  10. [0] = "TOTAL",
  11. [PERF_RECORD_MMAP] = "MMAP",
  12. [PERF_RECORD_LOST] = "LOST",
  13. [PERF_RECORD_COMM] = "COMM",
  14. [PERF_RECORD_EXIT] = "EXIT",
  15. [PERF_RECORD_THROTTLE] = "THROTTLE",
  16. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  17. [PERF_RECORD_FORK] = "FORK",
  18. [PERF_RECORD_READ] = "READ",
  19. [PERF_RECORD_SAMPLE] = "SAMPLE",
  20. [PERF_RECORD_HEADER_ATTR] = "ATTR",
  21. [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  22. [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  23. [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  24. };
  25. static pid_t event__synthesize_comm(pid_t pid, int full,
  26. event__handler_t process,
  27. struct perf_session *session)
  28. {
  29. event_t ev;
  30. char filename[PATH_MAX];
  31. char bf[BUFSIZ];
  32. FILE *fp;
  33. size_t size = 0;
  34. DIR *tasks;
  35. struct dirent dirent, *next;
  36. pid_t tgid = 0;
  37. snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  38. fp = fopen(filename, "r");
  39. if (fp == NULL) {
  40. out_race:
  41. /*
  42. * We raced with a task exiting - just return:
  43. */
  44. pr_debug("couldn't open %s\n", filename);
  45. return 0;
  46. }
  47. memset(&ev.comm, 0, sizeof(ev.comm));
  48. while (!ev.comm.comm[0] || !ev.comm.pid) {
  49. if (fgets(bf, sizeof(bf), fp) == NULL)
  50. goto out_failure;
  51. if (memcmp(bf, "Name:", 5) == 0) {
  52. char *name = bf + 5;
  53. while (*name && isspace(*name))
  54. ++name;
  55. size = strlen(name) - 1;
  56. memcpy(ev.comm.comm, name, size++);
  57. } else if (memcmp(bf, "Tgid:", 5) == 0) {
  58. char *tgids = bf + 5;
  59. while (*tgids && isspace(*tgids))
  60. ++tgids;
  61. tgid = ev.comm.pid = atoi(tgids);
  62. }
  63. }
  64. ev.comm.header.type = PERF_RECORD_COMM;
  65. size = ALIGN(size, sizeof(u64));
  66. ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
  67. if (!full) {
  68. ev.comm.tid = pid;
  69. process(&ev, session);
  70. goto out_fclose;
  71. }
  72. snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
  73. tasks = opendir(filename);
  74. if (tasks == NULL)
  75. goto out_race;
  76. while (!readdir_r(tasks, &dirent, &next) && next) {
  77. char *end;
  78. pid = strtol(dirent.d_name, &end, 10);
  79. if (*end)
  80. continue;
  81. ev.comm.tid = pid;
  82. process(&ev, session);
  83. }
  84. closedir(tasks);
  85. out_fclose:
  86. fclose(fp);
  87. return tgid;
  88. out_failure:
  89. pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
  90. return -1;
  91. }
  92. static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
  93. event__handler_t process,
  94. struct perf_session *session)
  95. {
  96. char filename[PATH_MAX];
  97. FILE *fp;
  98. snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
  99. fp = fopen(filename, "r");
  100. if (fp == NULL) {
  101. /*
  102. * We raced with a task exiting - just return:
  103. */
  104. pr_debug("couldn't open %s\n", filename);
  105. return -1;
  106. }
  107. while (1) {
  108. char bf[BUFSIZ], *pbf = bf;
  109. event_t ev = {
  110. .header = {
  111. .type = PERF_RECORD_MMAP,
  112. /*
  113. * Just like the kernel, see __perf_event_mmap
  114. * in kernel/perf_event.c
  115. */
  116. .misc = PERF_RECORD_MISC_USER,
  117. },
  118. };
  119. int n;
  120. size_t size;
  121. if (fgets(bf, sizeof(bf), fp) == NULL)
  122. break;
  123. /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
  124. n = hex2u64(pbf, &ev.mmap.start);
  125. if (n < 0)
  126. continue;
  127. pbf += n + 1;
  128. n = hex2u64(pbf, &ev.mmap.len);
  129. if (n < 0)
  130. continue;
  131. pbf += n + 3;
  132. if (*pbf == 'x') { /* vm_exec */
  133. u64 vm_pgoff;
  134. char *execname = strchr(bf, '/');
  135. /* Catch VDSO */
  136. if (execname == NULL)
  137. execname = strstr(bf, "[vdso]");
  138. if (execname == NULL)
  139. continue;
  140. pbf += 3;
  141. n = hex2u64(pbf, &vm_pgoff);
  142. /* pgoff is in bytes, not pages */
  143. if (n >= 0)
  144. ev.mmap.pgoff = vm_pgoff << getpagesize();
  145. else
  146. ev.mmap.pgoff = 0;
  147. size = strlen(execname);
  148. execname[size - 1] = '\0'; /* Remove \n */
  149. memcpy(ev.mmap.filename, execname, size);
  150. size = ALIGN(size, sizeof(u64));
  151. ev.mmap.len -= ev.mmap.start;
  152. ev.mmap.header.size = (sizeof(ev.mmap) -
  153. (sizeof(ev.mmap.filename) - size));
  154. ev.mmap.pid = tgid;
  155. ev.mmap.tid = pid;
  156. process(&ev, session);
  157. }
  158. }
  159. fclose(fp);
  160. return 0;
  161. }
  162. int event__synthesize_modules(event__handler_t process,
  163. struct perf_session *session,
  164. struct machine *machine)
  165. {
  166. struct rb_node *nd;
  167. struct map_groups *kmaps = &machine->kmaps;
  168. u16 misc;
  169. /*
  170. * kernel uses 0 for user space maps, see kernel/perf_event.c
  171. * __perf_event_mmap
  172. */
  173. if (machine__is_host(machine))
  174. misc = PERF_RECORD_MISC_KERNEL;
  175. else
  176. misc = PERF_RECORD_MISC_GUEST_KERNEL;
  177. for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
  178. nd; nd = rb_next(nd)) {
  179. event_t ev;
  180. size_t size;
  181. struct map *pos = rb_entry(nd, struct map, rb_node);
  182. if (pos->dso->kernel)
  183. continue;
  184. size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
  185. memset(&ev, 0, sizeof(ev));
  186. ev.mmap.header.misc = misc;
  187. ev.mmap.header.type = PERF_RECORD_MMAP;
  188. ev.mmap.header.size = (sizeof(ev.mmap) -
  189. (sizeof(ev.mmap.filename) - size));
  190. ev.mmap.start = pos->start;
  191. ev.mmap.len = pos->end - pos->start;
  192. ev.mmap.pid = machine->pid;
  193. memcpy(ev.mmap.filename, pos->dso->long_name,
  194. pos->dso->long_name_len + 1);
  195. process(&ev, session);
  196. }
  197. return 0;
  198. }
  199. int event__synthesize_thread(pid_t pid, event__handler_t process,
  200. struct perf_session *session)
  201. {
  202. pid_t tgid = event__synthesize_comm(pid, 1, process, session);
  203. if (tgid == -1)
  204. return -1;
  205. return event__synthesize_mmap_events(pid, tgid, process, session);
  206. }
  207. void event__synthesize_threads(event__handler_t process,
  208. struct perf_session *session)
  209. {
  210. DIR *proc;
  211. struct dirent dirent, *next;
  212. proc = opendir("/proc");
  213. while (!readdir_r(proc, &dirent, &next) && next) {
  214. char *end;
  215. pid_t pid = strtol(dirent.d_name, &end, 10);
  216. if (*end) /* only interested in proper numerical dirents */
  217. continue;
  218. event__synthesize_thread(pid, process, session);
  219. }
  220. closedir(proc);
  221. }
  222. struct process_symbol_args {
  223. const char *name;
  224. u64 start;
  225. };
  226. static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
  227. {
  228. struct process_symbol_args *args = arg;
  229. /*
  230. * Must be a function or at least an alias, as in PARISC64, where "_text" is
  231. * an 'A' to the same address as "_stext".
  232. */
  233. if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
  234. type == 'A') || strcmp(name, args->name))
  235. return 0;
  236. args->start = start;
  237. return 1;
  238. }
  239. int event__synthesize_kernel_mmap(event__handler_t process,
  240. struct perf_session *session,
  241. struct machine *machine,
  242. const char *symbol_name)
  243. {
  244. size_t size;
  245. const char *filename, *mmap_name;
  246. char path[PATH_MAX];
  247. char name_buff[PATH_MAX];
  248. struct map *map;
  249. event_t ev = {
  250. .header = {
  251. .type = PERF_RECORD_MMAP,
  252. },
  253. };
  254. /*
  255. * We should get this from /sys/kernel/sections/.text, but till that is
  256. * available use this, and after it is use this as a fallback for older
  257. * kernels.
  258. */
  259. struct process_symbol_args args = { .name = symbol_name, };
  260. mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
  261. if (machine__is_host(machine)) {
  262. /*
  263. * kernel uses PERF_RECORD_MISC_USER for user space maps,
  264. * see kernel/perf_event.c __perf_event_mmap
  265. */
  266. ev.header.misc = PERF_RECORD_MISC_KERNEL;
  267. filename = "/proc/kallsyms";
  268. } else {
  269. ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  270. if (machine__is_default_guest(machine))
  271. filename = (char *) symbol_conf.default_guest_kallsyms;
  272. else {
  273. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  274. filename = path;
  275. }
  276. }
  277. if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
  278. return -ENOENT;
  279. map = machine->vmlinux_maps[MAP__FUNCTION];
  280. size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
  281. "%s%s", mmap_name, symbol_name) + 1;
  282. size = ALIGN(size, sizeof(u64));
  283. ev.mmap.header.size = (sizeof(ev.mmap) -
  284. (sizeof(ev.mmap.filename) - size));
  285. ev.mmap.pgoff = args.start;
  286. ev.mmap.start = map->start;
  287. ev.mmap.len = map->end - ev.mmap.start;
  288. ev.mmap.pid = machine->pid;
  289. return process(&ev, session);
  290. }
  291. static void thread__comm_adjust(struct thread *self)
  292. {
  293. char *comm = self->comm;
  294. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  295. (!symbol_conf.comm_list ||
  296. strlist__has_entry(symbol_conf.comm_list, comm))) {
  297. unsigned int slen = strlen(comm);
  298. if (slen > comms__col_width) {
  299. comms__col_width = slen;
  300. threads__col_width = slen + 6;
  301. }
  302. }
  303. }
  304. static int thread__set_comm_adjust(struct thread *self, const char *comm)
  305. {
  306. int ret = thread__set_comm(self, comm);
  307. if (ret)
  308. return ret;
  309. thread__comm_adjust(self);
  310. return 0;
  311. }
  312. int event__process_comm(event_t *self, struct perf_session *session)
  313. {
  314. struct thread *thread = perf_session__findnew(session, self->comm.pid);
  315. dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid);
  316. if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
  317. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  318. return -1;
  319. }
  320. return 0;
  321. }
  322. int event__process_lost(event_t *self, struct perf_session *session)
  323. {
  324. dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
  325. session->hists.stats.total_lost += self->lost.lost;
  326. return 0;
  327. }
  328. static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
  329. {
  330. maps[MAP__FUNCTION]->start = self->mmap.start;
  331. maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
  332. /*
  333. * Be a bit paranoid here, some perf.data file came with
  334. * a zero sized synthesized MMAP event for the kernel.
  335. */
  336. if (maps[MAP__FUNCTION]->end == 0)
  337. maps[MAP__FUNCTION]->end = ~0UL;
  338. }
  339. static int event__process_kernel_mmap(event_t *self,
  340. struct perf_session *session)
  341. {
  342. struct map *map;
  343. char kmmap_prefix[PATH_MAX];
  344. struct machine *machine;
  345. enum dso_kernel_type kernel_type;
  346. bool is_kernel_mmap;
  347. machine = perf_session__findnew_machine(session, self->mmap.pid);
  348. if (!machine) {
  349. pr_err("Can't find id %d's machine\n", self->mmap.pid);
  350. goto out_problem;
  351. }
  352. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  353. if (machine__is_host(machine))
  354. kernel_type = DSO_TYPE_KERNEL;
  355. else
  356. kernel_type = DSO_TYPE_GUEST_KERNEL;
  357. is_kernel_mmap = memcmp(self->mmap.filename,
  358. kmmap_prefix,
  359. strlen(kmmap_prefix)) == 0;
  360. if (self->mmap.filename[0] == '/' ||
  361. (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
  362. char short_module_name[1024];
  363. char *name, *dot;
  364. if (self->mmap.filename[0] == '/') {
  365. name = strrchr(self->mmap.filename, '/');
  366. if (name == NULL)
  367. goto out_problem;
  368. ++name; /* skip / */
  369. dot = strrchr(name, '.');
  370. if (dot == NULL)
  371. goto out_problem;
  372. snprintf(short_module_name, sizeof(short_module_name),
  373. "[%.*s]", (int)(dot - name), name);
  374. strxfrchar(short_module_name, '-', '_');
  375. } else
  376. strcpy(short_module_name, self->mmap.filename);
  377. map = machine__new_module(machine, self->mmap.start,
  378. self->mmap.filename);
  379. if (map == NULL)
  380. goto out_problem;
  381. name = strdup(short_module_name);
  382. if (name == NULL)
  383. goto out_problem;
  384. map->dso->short_name = name;
  385. map->end = map->start + self->mmap.len;
  386. } else if (is_kernel_mmap) {
  387. const char *symbol_name = (self->mmap.filename +
  388. strlen(kmmap_prefix));
  389. /*
  390. * Should be there already, from the build-id table in
  391. * the header.
  392. */
  393. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  394. kmmap_prefix);
  395. if (kernel == NULL)
  396. goto out_problem;
  397. kernel->kernel = kernel_type;
  398. if (__machine__create_kernel_maps(machine, kernel) < 0)
  399. goto out_problem;
  400. event_set_kernel_mmap_len(machine->vmlinux_maps, self);
  401. perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  402. symbol_name,
  403. self->mmap.pgoff);
  404. if (machine__is_default_guest(machine)) {
  405. /*
  406. * preload dso of guest kernel and modules
  407. */
  408. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  409. NULL);
  410. }
  411. }
  412. return 0;
  413. out_problem:
  414. return -1;
  415. }
  416. int event__process_mmap(event_t *self, struct perf_session *session)
  417. {
  418. struct machine *machine;
  419. struct thread *thread;
  420. struct map *map;
  421. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  422. int ret = 0;
  423. dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
  424. self->mmap.pid, self->mmap.tid, self->mmap.start,
  425. self->mmap.len, self->mmap.pgoff, self->mmap.filename);
  426. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  427. cpumode == PERF_RECORD_MISC_KERNEL) {
  428. ret = event__process_kernel_mmap(self, session);
  429. if (ret < 0)
  430. goto out_problem;
  431. return 0;
  432. }
  433. machine = perf_session__find_host_machine(session);
  434. if (machine == NULL)
  435. goto out_problem;
  436. thread = perf_session__findnew(session, self->mmap.pid);
  437. map = map__new(&machine->user_dsos, self->mmap.start,
  438. self->mmap.len, self->mmap.pgoff,
  439. self->mmap.pid, self->mmap.filename,
  440. MAP__FUNCTION, session->cwd, session->cwdlen);
  441. if (thread == NULL || map == NULL)
  442. goto out_problem;
  443. thread__insert_map(thread, map);
  444. return 0;
  445. out_problem:
  446. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  447. return 0;
  448. }
  449. int event__process_task(event_t *self, struct perf_session *session)
  450. {
  451. struct thread *thread = perf_session__findnew(session, self->fork.pid);
  452. struct thread *parent = perf_session__findnew(session, self->fork.ppid);
  453. dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
  454. self->fork.ppid, self->fork.ptid);
  455. /*
  456. * A thread clone will have the same PID for both parent and child.
  457. */
  458. if (thread == parent)
  459. return 0;
  460. if (self->header.type == PERF_RECORD_EXIT)
  461. return 0;
  462. if (thread == NULL || parent == NULL ||
  463. thread__fork(thread, parent) < 0) {
  464. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  465. return -1;
  466. }
  467. return 0;
  468. }
  469. void thread__find_addr_map(struct thread *self,
  470. struct perf_session *session, u8 cpumode,
  471. enum map_type type, pid_t pid, u64 addr,
  472. struct addr_location *al)
  473. {
  474. struct map_groups *mg = &self->mg;
  475. struct machine *machine = NULL;
  476. al->thread = self;
  477. al->addr = addr;
  478. al->cpumode = cpumode;
  479. al->filtered = false;
  480. if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
  481. al->level = 'k';
  482. machine = perf_session__find_host_machine(session);
  483. if (machine == NULL) {
  484. al->map = NULL;
  485. return;
  486. }
  487. mg = &machine->kmaps;
  488. } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
  489. al->level = '.';
  490. machine = perf_session__find_host_machine(session);
  491. } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
  492. al->level = 'g';
  493. machine = perf_session__find_machine(session, pid);
  494. if (machine == NULL) {
  495. al->map = NULL;
  496. return;
  497. }
  498. mg = &machine->kmaps;
  499. } else {
  500. /*
  501. * 'u' means guest os user space.
  502. * TODO: We don't support guest user space. Might support late.
  503. */
  504. if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
  505. al->level = 'u';
  506. else
  507. al->level = 'H';
  508. al->map = NULL;
  509. if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
  510. cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
  511. !perf_guest)
  512. al->filtered = true;
  513. if ((cpumode == PERF_RECORD_MISC_USER ||
  514. cpumode == PERF_RECORD_MISC_KERNEL) &&
  515. !perf_host)
  516. al->filtered = true;
  517. return;
  518. }
  519. try_again:
  520. al->map = map_groups__find(mg, type, al->addr);
  521. if (al->map == NULL) {
  522. /*
  523. * If this is outside of all known maps, and is a negative
  524. * address, try to look it up in the kernel dso, as it might be
  525. * a vsyscall or vdso (which executes in user-mode).
  526. *
  527. * XXX This is nasty, we should have a symbol list in the
  528. * "[vdso]" dso, but for now lets use the old trick of looking
  529. * in the whole kernel symbol list.
  530. */
  531. if ((long long)al->addr < 0 &&
  532. cpumode == PERF_RECORD_MISC_KERNEL &&
  533. machine && mg != &machine->kmaps) {
  534. mg = &machine->kmaps;
  535. goto try_again;
  536. }
  537. } else
  538. al->addr = al->map->map_ip(al->map, al->addr);
  539. }
  540. void thread__find_addr_location(struct thread *self,
  541. struct perf_session *session, u8 cpumode,
  542. enum map_type type, pid_t pid, u64 addr,
  543. struct addr_location *al,
  544. symbol_filter_t filter)
  545. {
  546. thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
  547. if (al->map != NULL)
  548. al->sym = map__find_symbol(al->map, al->addr, filter);
  549. else
  550. al->sym = NULL;
  551. }
  552. static void dso__calc_col_width(struct dso *self)
  553. {
  554. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  555. (!symbol_conf.dso_list ||
  556. strlist__has_entry(symbol_conf.dso_list, self->name))) {
  557. u16 slen = self->short_name_len;
  558. if (verbose)
  559. slen = self->long_name_len;
  560. if (dsos__col_width < slen)
  561. dsos__col_width = slen;
  562. }
  563. self->slen_calculated = 1;
  564. }
  565. int event__preprocess_sample(const event_t *self, struct perf_session *session,
  566. struct addr_location *al, symbol_filter_t filter)
  567. {
  568. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  569. struct thread *thread = perf_session__findnew(session, self->ip.pid);
  570. if (thread == NULL)
  571. return -1;
  572. if (symbol_conf.comm_list &&
  573. !strlist__has_entry(symbol_conf.comm_list, thread->comm))
  574. goto out_filtered;
  575. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  576. /*
  577. * Have we already created the kernel maps for the host machine?
  578. *
  579. * This should have happened earlier, when we processed the kernel MMAP
  580. * events, but for older perf.data files there was no such thing, so do
  581. * it now.
  582. */
  583. if (cpumode == PERF_RECORD_MISC_KERNEL &&
  584. session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
  585. machine__create_kernel_maps(&session->host_machine);
  586. thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
  587. self->ip.pid, self->ip.ip, al);
  588. dump_printf(" ...... dso: %s\n",
  589. al->map ? al->map->dso->long_name :
  590. al->level == 'H' ? "[hypervisor]" : "<not found>");
  591. al->sym = NULL;
  592. if (al->map) {
  593. if (symbol_conf.dso_list &&
  594. (!al->map || !al->map->dso ||
  595. !(strlist__has_entry(symbol_conf.dso_list,
  596. al->map->dso->short_name) ||
  597. (al->map->dso->short_name != al->map->dso->long_name &&
  598. strlist__has_entry(symbol_conf.dso_list,
  599. al->map->dso->long_name)))))
  600. goto out_filtered;
  601. /*
  602. * We have to do this here as we may have a dso with no symbol
  603. * hit that has a name longer than the ones with symbols
  604. * sampled.
  605. */
  606. if (!sort_dso.elide && !al->map->dso->slen_calculated)
  607. dso__calc_col_width(al->map->dso);
  608. al->sym = map__find_symbol(al->map, al->addr, filter);
  609. } else {
  610. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  611. if (dsos__col_width < unresolved_col_width &&
  612. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  613. !symbol_conf.dso_list)
  614. dsos__col_width = unresolved_col_width;
  615. }
  616. if (symbol_conf.sym_list && al->sym &&
  617. !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
  618. goto out_filtered;
  619. return 0;
  620. out_filtered:
  621. al->filtered = true;
  622. return 0;
  623. }
  624. int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
  625. {
  626. u64 *array = event->sample.array;
  627. if (type & PERF_SAMPLE_IP) {
  628. data->ip = event->ip.ip;
  629. array++;
  630. }
  631. if (type & PERF_SAMPLE_TID) {
  632. u32 *p = (u32 *)array;
  633. data->pid = p[0];
  634. data->tid = p[1];
  635. array++;
  636. }
  637. if (type & PERF_SAMPLE_TIME) {
  638. data->time = *array;
  639. array++;
  640. }
  641. if (type & PERF_SAMPLE_ADDR) {
  642. data->addr = *array;
  643. array++;
  644. }
  645. data->id = -1ULL;
  646. if (type & PERF_SAMPLE_ID) {
  647. data->id = *array;
  648. array++;
  649. }
  650. if (type & PERF_SAMPLE_STREAM_ID) {
  651. data->stream_id = *array;
  652. array++;
  653. }
  654. if (type & PERF_SAMPLE_CPU) {
  655. u32 *p = (u32 *)array;
  656. data->cpu = *p;
  657. array++;
  658. }
  659. if (type & PERF_SAMPLE_PERIOD) {
  660. data->period = *array;
  661. array++;
  662. }
  663. if (type & PERF_SAMPLE_READ) {
  664. pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
  665. return -1;
  666. }
  667. if (type & PERF_SAMPLE_CALLCHAIN) {
  668. data->callchain = (struct ip_callchain *)array;
  669. array += 1 + data->callchain->nr;
  670. }
  671. if (type & PERF_SAMPLE_RAW) {
  672. u32 *p = (u32 *)array;
  673. data->raw_size = *p;
  674. p++;
  675. data->raw_data = p;
  676. }
  677. return 0;
  678. }