event.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. #include <linux/types.h>
  2. #include "event.h"
  3. #include "debug.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "string.h"
  7. #include "strlist.h"
  8. #include "thread.h"
  9. const char *event__name[] = {
  10. [0] = "TOTAL",
  11. [PERF_RECORD_MMAP] = "MMAP",
  12. [PERF_RECORD_LOST] = "LOST",
  13. [PERF_RECORD_COMM] = "COMM",
  14. [PERF_RECORD_EXIT] = "EXIT",
  15. [PERF_RECORD_THROTTLE] = "THROTTLE",
  16. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  17. [PERF_RECORD_FORK] = "FORK",
  18. [PERF_RECORD_READ] = "READ",
  19. [PERF_RECORD_SAMPLE] = "SAMPLE",
  20. [PERF_RECORD_HEADER_ATTR] = "ATTR",
  21. [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  22. [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  23. [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  24. };
  25. static struct sample_data synth_sample = {
  26. .pid = -1,
  27. .tid = -1,
  28. .time = -1,
  29. .stream_id = -1,
  30. .cpu = -1,
  31. .period = 1,
  32. };
  33. static pid_t event__synthesize_comm(pid_t pid, int full,
  34. event__handler_t process,
  35. struct perf_session *session)
  36. {
  37. event_t ev;
  38. char filename[PATH_MAX];
  39. char bf[BUFSIZ];
  40. FILE *fp;
  41. size_t size = 0;
  42. DIR *tasks;
  43. struct dirent dirent, *next;
  44. pid_t tgid = 0;
  45. snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  46. fp = fopen(filename, "r");
  47. if (fp == NULL) {
  48. out_race:
  49. /*
  50. * We raced with a task exiting - just return:
  51. */
  52. pr_debug("couldn't open %s\n", filename);
  53. return 0;
  54. }
  55. memset(&ev.comm, 0, sizeof(ev.comm));
  56. while (!ev.comm.comm[0] || !ev.comm.pid) {
  57. if (fgets(bf, sizeof(bf), fp) == NULL)
  58. goto out_failure;
  59. if (memcmp(bf, "Name:", 5) == 0) {
  60. char *name = bf + 5;
  61. while (*name && isspace(*name))
  62. ++name;
  63. size = strlen(name) - 1;
  64. memcpy(ev.comm.comm, name, size++);
  65. } else if (memcmp(bf, "Tgid:", 5) == 0) {
  66. char *tgids = bf + 5;
  67. while (*tgids && isspace(*tgids))
  68. ++tgids;
  69. tgid = ev.comm.pid = atoi(tgids);
  70. }
  71. }
  72. ev.comm.header.type = PERF_RECORD_COMM;
  73. size = ALIGN(size, sizeof(u64));
  74. ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
  75. if (!full) {
  76. ev.comm.tid = pid;
  77. process(&ev, &synth_sample, session);
  78. goto out_fclose;
  79. }
  80. snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
  81. tasks = opendir(filename);
  82. if (tasks == NULL)
  83. goto out_race;
  84. while (!readdir_r(tasks, &dirent, &next) && next) {
  85. char *end;
  86. pid = strtol(dirent.d_name, &end, 10);
  87. if (*end)
  88. continue;
  89. ev.comm.tid = pid;
  90. process(&ev, &synth_sample, session);
  91. }
  92. closedir(tasks);
  93. out_fclose:
  94. fclose(fp);
  95. return tgid;
  96. out_failure:
  97. pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
  98. return -1;
  99. }
  100. static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
  101. event__handler_t process,
  102. struct perf_session *session)
  103. {
  104. char filename[PATH_MAX];
  105. FILE *fp;
  106. snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
  107. fp = fopen(filename, "r");
  108. if (fp == NULL) {
  109. /*
  110. * We raced with a task exiting - just return:
  111. */
  112. pr_debug("couldn't open %s\n", filename);
  113. return -1;
  114. }
  115. while (1) {
  116. char bf[BUFSIZ], *pbf = bf;
  117. event_t ev = {
  118. .header = {
  119. .type = PERF_RECORD_MMAP,
  120. /*
  121. * Just like the kernel, see __perf_event_mmap
  122. * in kernel/perf_event.c
  123. */
  124. .misc = PERF_RECORD_MISC_USER,
  125. },
  126. };
  127. int n;
  128. size_t size;
  129. if (fgets(bf, sizeof(bf), fp) == NULL)
  130. break;
  131. /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
  132. n = hex2u64(pbf, &ev.mmap.start);
  133. if (n < 0)
  134. continue;
  135. pbf += n + 1;
  136. n = hex2u64(pbf, &ev.mmap.len);
  137. if (n < 0)
  138. continue;
  139. pbf += n + 3;
  140. if (*pbf == 'x') { /* vm_exec */
  141. char *execname = strchr(bf, '/');
  142. /* Catch VDSO */
  143. if (execname == NULL)
  144. execname = strstr(bf, "[vdso]");
  145. if (execname == NULL)
  146. continue;
  147. pbf += 3;
  148. n = hex2u64(pbf, &ev.mmap.pgoff);
  149. size = strlen(execname);
  150. execname[size - 1] = '\0'; /* Remove \n */
  151. memcpy(ev.mmap.filename, execname, size);
  152. size = ALIGN(size, sizeof(u64));
  153. ev.mmap.len -= ev.mmap.start;
  154. ev.mmap.header.size = (sizeof(ev.mmap) -
  155. (sizeof(ev.mmap.filename) - size));
  156. ev.mmap.pid = tgid;
  157. ev.mmap.tid = pid;
  158. process(&ev, &synth_sample, session);
  159. }
  160. }
  161. fclose(fp);
  162. return 0;
  163. }
  164. int event__synthesize_modules(event__handler_t process,
  165. struct perf_session *session,
  166. struct machine *machine)
  167. {
  168. struct rb_node *nd;
  169. struct map_groups *kmaps = &machine->kmaps;
  170. u16 misc;
  171. /*
  172. * kernel uses 0 for user space maps, see kernel/perf_event.c
  173. * __perf_event_mmap
  174. */
  175. if (machine__is_host(machine))
  176. misc = PERF_RECORD_MISC_KERNEL;
  177. else
  178. misc = PERF_RECORD_MISC_GUEST_KERNEL;
  179. for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
  180. nd; nd = rb_next(nd)) {
  181. event_t ev;
  182. size_t size;
  183. struct map *pos = rb_entry(nd, struct map, rb_node);
  184. if (pos->dso->kernel)
  185. continue;
  186. size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
  187. memset(&ev, 0, sizeof(ev));
  188. ev.mmap.header.misc = misc;
  189. ev.mmap.header.type = PERF_RECORD_MMAP;
  190. ev.mmap.header.size = (sizeof(ev.mmap) -
  191. (sizeof(ev.mmap.filename) - size));
  192. ev.mmap.start = pos->start;
  193. ev.mmap.len = pos->end - pos->start;
  194. ev.mmap.pid = machine->pid;
  195. memcpy(ev.mmap.filename, pos->dso->long_name,
  196. pos->dso->long_name_len + 1);
  197. process(&ev, &synth_sample, session);
  198. }
  199. return 0;
  200. }
  201. int event__synthesize_thread(pid_t pid, event__handler_t process,
  202. struct perf_session *session)
  203. {
  204. pid_t tgid = event__synthesize_comm(pid, 1, process, session);
  205. if (tgid == -1)
  206. return -1;
  207. return event__synthesize_mmap_events(pid, tgid, process, session);
  208. }
  209. void event__synthesize_threads(event__handler_t process,
  210. struct perf_session *session)
  211. {
  212. DIR *proc;
  213. struct dirent dirent, *next;
  214. proc = opendir("/proc");
  215. while (!readdir_r(proc, &dirent, &next) && next) {
  216. char *end;
  217. pid_t pid = strtol(dirent.d_name, &end, 10);
  218. if (*end) /* only interested in proper numerical dirents */
  219. continue;
  220. event__synthesize_thread(pid, process, session);
  221. }
  222. closedir(proc);
  223. }
  224. struct process_symbol_args {
  225. const char *name;
  226. u64 start;
  227. };
  228. static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
  229. {
  230. struct process_symbol_args *args = arg;
  231. /*
  232. * Must be a function or at least an alias, as in PARISC64, where "_text" is
  233. * an 'A' to the same address as "_stext".
  234. */
  235. if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
  236. type == 'A') || strcmp(name, args->name))
  237. return 0;
  238. args->start = start;
  239. return 1;
  240. }
  241. int event__synthesize_kernel_mmap(event__handler_t process,
  242. struct perf_session *session,
  243. struct machine *machine,
  244. const char *symbol_name)
  245. {
  246. size_t size;
  247. const char *filename, *mmap_name;
  248. char path[PATH_MAX];
  249. char name_buff[PATH_MAX];
  250. struct map *map;
  251. event_t ev = {
  252. .header = {
  253. .type = PERF_RECORD_MMAP,
  254. },
  255. };
  256. /*
  257. * We should get this from /sys/kernel/sections/.text, but till that is
  258. * available use this, and after it is use this as a fallback for older
  259. * kernels.
  260. */
  261. struct process_symbol_args args = { .name = symbol_name, };
  262. mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
  263. if (machine__is_host(machine)) {
  264. /*
  265. * kernel uses PERF_RECORD_MISC_USER for user space maps,
  266. * see kernel/perf_event.c __perf_event_mmap
  267. */
  268. ev.header.misc = PERF_RECORD_MISC_KERNEL;
  269. filename = "/proc/kallsyms";
  270. } else {
  271. ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  272. if (machine__is_default_guest(machine))
  273. filename = (char *) symbol_conf.default_guest_kallsyms;
  274. else {
  275. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  276. filename = path;
  277. }
  278. }
  279. if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
  280. return -ENOENT;
  281. map = machine->vmlinux_maps[MAP__FUNCTION];
  282. size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
  283. "%s%s", mmap_name, symbol_name) + 1;
  284. size = ALIGN(size, sizeof(u64));
  285. ev.mmap.header.size = (sizeof(ev.mmap) -
  286. (sizeof(ev.mmap.filename) - size));
  287. ev.mmap.pgoff = args.start;
  288. ev.mmap.start = map->start;
  289. ev.mmap.len = map->end - ev.mmap.start;
  290. ev.mmap.pid = machine->pid;
  291. return process(&ev, &synth_sample, session);
  292. }
  293. static void thread__comm_adjust(struct thread *self, struct hists *hists)
  294. {
  295. char *comm = self->comm;
  296. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  297. (!symbol_conf.comm_list ||
  298. strlist__has_entry(symbol_conf.comm_list, comm))) {
  299. u16 slen = strlen(comm);
  300. if (hists__new_col_len(hists, HISTC_COMM, slen))
  301. hists__set_col_len(hists, HISTC_THREAD, slen + 6);
  302. }
  303. }
  304. static int thread__set_comm_adjust(struct thread *self, const char *comm,
  305. struct hists *hists)
  306. {
  307. int ret = thread__set_comm(self, comm);
  308. if (ret)
  309. return ret;
  310. thread__comm_adjust(self, hists);
  311. return 0;
  312. }
  313. int event__process_comm(event_t *self, struct sample_data *sample __used,
  314. struct perf_session *session)
  315. {
  316. struct thread *thread = perf_session__findnew(session, self->comm.tid);
  317. dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
  318. if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
  319. &session->hists)) {
  320. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  321. return -1;
  322. }
  323. return 0;
  324. }
  325. int event__process_lost(event_t *self, struct sample_data *sample __used,
  326. struct perf_session *session)
  327. {
  328. dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
  329. session->hists.stats.total_lost += self->lost.lost;
  330. return 0;
  331. }
  332. static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
  333. {
  334. maps[MAP__FUNCTION]->start = self->mmap.start;
  335. maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
  336. /*
  337. * Be a bit paranoid here, some perf.data file came with
  338. * a zero sized synthesized MMAP event for the kernel.
  339. */
  340. if (maps[MAP__FUNCTION]->end == 0)
  341. maps[MAP__FUNCTION]->end = ~0ULL;
  342. }
  343. static int event__process_kernel_mmap(event_t *self,
  344. struct perf_session *session)
  345. {
  346. struct map *map;
  347. char kmmap_prefix[PATH_MAX];
  348. struct machine *machine;
  349. enum dso_kernel_type kernel_type;
  350. bool is_kernel_mmap;
  351. machine = perf_session__findnew_machine(session, self->mmap.pid);
  352. if (!machine) {
  353. pr_err("Can't find id %d's machine\n", self->mmap.pid);
  354. goto out_problem;
  355. }
  356. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  357. if (machine__is_host(machine))
  358. kernel_type = DSO_TYPE_KERNEL;
  359. else
  360. kernel_type = DSO_TYPE_GUEST_KERNEL;
  361. is_kernel_mmap = memcmp(self->mmap.filename,
  362. kmmap_prefix,
  363. strlen(kmmap_prefix)) == 0;
  364. if (self->mmap.filename[0] == '/' ||
  365. (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
  366. char short_module_name[1024];
  367. char *name, *dot;
  368. if (self->mmap.filename[0] == '/') {
  369. name = strrchr(self->mmap.filename, '/');
  370. if (name == NULL)
  371. goto out_problem;
  372. ++name; /* skip / */
  373. dot = strrchr(name, '.');
  374. if (dot == NULL)
  375. goto out_problem;
  376. snprintf(short_module_name, sizeof(short_module_name),
  377. "[%.*s]", (int)(dot - name), name);
  378. strxfrchar(short_module_name, '-', '_');
  379. } else
  380. strcpy(short_module_name, self->mmap.filename);
  381. map = machine__new_module(machine, self->mmap.start,
  382. self->mmap.filename);
  383. if (map == NULL)
  384. goto out_problem;
  385. name = strdup(short_module_name);
  386. if (name == NULL)
  387. goto out_problem;
  388. map->dso->short_name = name;
  389. map->dso->sname_alloc = 1;
  390. map->end = map->start + self->mmap.len;
  391. } else if (is_kernel_mmap) {
  392. const char *symbol_name = (self->mmap.filename +
  393. strlen(kmmap_prefix));
  394. /*
  395. * Should be there already, from the build-id table in
  396. * the header.
  397. */
  398. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  399. kmmap_prefix);
  400. if (kernel == NULL)
  401. goto out_problem;
  402. kernel->kernel = kernel_type;
  403. if (__machine__create_kernel_maps(machine, kernel) < 0)
  404. goto out_problem;
  405. event_set_kernel_mmap_len(machine->vmlinux_maps, self);
  406. perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  407. symbol_name,
  408. self->mmap.pgoff);
  409. if (machine__is_default_guest(machine)) {
  410. /*
  411. * preload dso of guest kernel and modules
  412. */
  413. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  414. NULL);
  415. }
  416. }
  417. return 0;
  418. out_problem:
  419. return -1;
  420. }
  421. int event__process_mmap(event_t *self, struct sample_data *sample __used,
  422. struct perf_session *session)
  423. {
  424. struct machine *machine;
  425. struct thread *thread;
  426. struct map *map;
  427. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  428. int ret = 0;
  429. dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
  430. self->mmap.pid, self->mmap.tid, self->mmap.start,
  431. self->mmap.len, self->mmap.pgoff, self->mmap.filename);
  432. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  433. cpumode == PERF_RECORD_MISC_KERNEL) {
  434. ret = event__process_kernel_mmap(self, session);
  435. if (ret < 0)
  436. goto out_problem;
  437. return 0;
  438. }
  439. machine = perf_session__find_host_machine(session);
  440. if (machine == NULL)
  441. goto out_problem;
  442. thread = perf_session__findnew(session, self->mmap.pid);
  443. if (thread == NULL)
  444. goto out_problem;
  445. map = map__new(&machine->user_dsos, self->mmap.start,
  446. self->mmap.len, self->mmap.pgoff,
  447. self->mmap.pid, self->mmap.filename,
  448. MAP__FUNCTION);
  449. if (map == NULL)
  450. goto out_problem;
  451. thread__insert_map(thread, map);
  452. return 0;
  453. out_problem:
  454. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  455. return 0;
  456. }
  457. int event__process_task(event_t *self, struct sample_data *sample __used,
  458. struct perf_session *session)
  459. {
  460. struct thread *thread = perf_session__findnew(session, self->fork.tid);
  461. struct thread *parent = perf_session__findnew(session, self->fork.ptid);
  462. dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
  463. self->fork.ppid, self->fork.ptid);
  464. if (self->header.type == PERF_RECORD_EXIT) {
  465. perf_session__remove_thread(session, thread);
  466. return 0;
  467. }
  468. if (thread == NULL || parent == NULL ||
  469. thread__fork(thread, parent) < 0) {
  470. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  471. return -1;
  472. }
  473. return 0;
  474. }
  475. int event__process(event_t *event, struct sample_data *sample,
  476. struct perf_session *session)
  477. {
  478. switch (event->header.type) {
  479. case PERF_RECORD_COMM:
  480. event__process_comm(event, sample, session);
  481. break;
  482. case PERF_RECORD_MMAP:
  483. event__process_mmap(event, sample, session);
  484. break;
  485. case PERF_RECORD_FORK:
  486. case PERF_RECORD_EXIT:
  487. event__process_task(event, sample, session);
  488. break;
  489. default:
  490. break;
  491. }
  492. return 0;
  493. }
  494. void thread__find_addr_map(struct thread *self,
  495. struct perf_session *session, u8 cpumode,
  496. enum map_type type, pid_t pid, u64 addr,
  497. struct addr_location *al)
  498. {
  499. struct map_groups *mg = &self->mg;
  500. struct machine *machine = NULL;
  501. al->thread = self;
  502. al->addr = addr;
  503. al->cpumode = cpumode;
  504. al->filtered = false;
  505. if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
  506. al->level = 'k';
  507. machine = perf_session__find_host_machine(session);
  508. if (machine == NULL) {
  509. al->map = NULL;
  510. return;
  511. }
  512. mg = &machine->kmaps;
  513. } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
  514. al->level = '.';
  515. machine = perf_session__find_host_machine(session);
  516. } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
  517. al->level = 'g';
  518. machine = perf_session__find_machine(session, pid);
  519. if (machine == NULL) {
  520. al->map = NULL;
  521. return;
  522. }
  523. mg = &machine->kmaps;
  524. } else {
  525. /*
  526. * 'u' means guest os user space.
  527. * TODO: We don't support guest user space. Might support late.
  528. */
  529. if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
  530. al->level = 'u';
  531. else
  532. al->level = 'H';
  533. al->map = NULL;
  534. if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
  535. cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
  536. !perf_guest)
  537. al->filtered = true;
  538. if ((cpumode == PERF_RECORD_MISC_USER ||
  539. cpumode == PERF_RECORD_MISC_KERNEL) &&
  540. !perf_host)
  541. al->filtered = true;
  542. return;
  543. }
  544. try_again:
  545. al->map = map_groups__find(mg, type, al->addr);
  546. if (al->map == NULL) {
  547. /*
  548. * If this is outside of all known maps, and is a negative
  549. * address, try to look it up in the kernel dso, as it might be
  550. * a vsyscall or vdso (which executes in user-mode).
  551. *
  552. * XXX This is nasty, we should have a symbol list in the
  553. * "[vdso]" dso, but for now lets use the old trick of looking
  554. * in the whole kernel symbol list.
  555. */
  556. if ((long long)al->addr < 0 &&
  557. cpumode == PERF_RECORD_MISC_KERNEL &&
  558. machine && mg != &machine->kmaps) {
  559. mg = &machine->kmaps;
  560. goto try_again;
  561. }
  562. } else
  563. al->addr = al->map->map_ip(al->map, al->addr);
  564. }
  565. void thread__find_addr_location(struct thread *self,
  566. struct perf_session *session, u8 cpumode,
  567. enum map_type type, pid_t pid, u64 addr,
  568. struct addr_location *al,
  569. symbol_filter_t filter)
  570. {
  571. thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
  572. if (al->map != NULL)
  573. al->sym = map__find_symbol(al->map, al->addr, filter);
  574. else
  575. al->sym = NULL;
  576. }
  577. static void dso__calc_col_width(struct dso *self, struct hists *hists)
  578. {
  579. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  580. (!symbol_conf.dso_list ||
  581. strlist__has_entry(symbol_conf.dso_list, self->name))) {
  582. u16 slen = dso__name_len(self);
  583. hists__new_col_len(hists, HISTC_DSO, slen);
  584. }
  585. self->slen_calculated = 1;
  586. }
  587. int event__preprocess_sample(const event_t *self, struct perf_session *session,
  588. struct addr_location *al, struct sample_data *data,
  589. symbol_filter_t filter)
  590. {
  591. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  592. struct thread *thread = perf_session__findnew(session, self->ip.pid);
  593. if (thread == NULL)
  594. return -1;
  595. if (symbol_conf.comm_list &&
  596. !strlist__has_entry(symbol_conf.comm_list, thread->comm))
  597. goto out_filtered;
  598. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  599. /*
  600. * Have we already created the kernel maps for the host machine?
  601. *
  602. * This should have happened earlier, when we processed the kernel MMAP
  603. * events, but for older perf.data files there was no such thing, so do
  604. * it now.
  605. */
  606. if (cpumode == PERF_RECORD_MISC_KERNEL &&
  607. session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
  608. machine__create_kernel_maps(&session->host_machine);
  609. thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
  610. self->ip.pid, self->ip.ip, al);
  611. dump_printf(" ...... dso: %s\n",
  612. al->map ? al->map->dso->long_name :
  613. al->level == 'H' ? "[hypervisor]" : "<not found>");
  614. al->sym = NULL;
  615. al->cpu = data->cpu;
  616. if (al->map) {
  617. if (symbol_conf.dso_list &&
  618. (!al->map || !al->map->dso ||
  619. !(strlist__has_entry(symbol_conf.dso_list,
  620. al->map->dso->short_name) ||
  621. (al->map->dso->short_name != al->map->dso->long_name &&
  622. strlist__has_entry(symbol_conf.dso_list,
  623. al->map->dso->long_name)))))
  624. goto out_filtered;
  625. /*
  626. * We have to do this here as we may have a dso with no symbol
  627. * hit that has a name longer than the ones with symbols
  628. * sampled.
  629. */
  630. if (!sort_dso.elide && !al->map->dso->slen_calculated)
  631. dso__calc_col_width(al->map->dso, &session->hists);
  632. al->sym = map__find_symbol(al->map, al->addr, filter);
  633. } else {
  634. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  635. if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
  636. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  637. !symbol_conf.dso_list)
  638. hists__set_col_len(&session->hists, HISTC_DSO,
  639. unresolved_col_width);
  640. }
  641. if (symbol_conf.sym_list && al->sym &&
  642. !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
  643. goto out_filtered;
  644. return 0;
  645. out_filtered:
  646. al->filtered = true;
  647. return 0;
  648. }
  649. int event__parse_sample(const event_t *event, u64 type, struct sample_data *data)
  650. {
  651. const u64 *array = event->sample.array;
  652. if (type & PERF_SAMPLE_IP) {
  653. data->ip = event->ip.ip;
  654. array++;
  655. }
  656. if (type & PERF_SAMPLE_TID) {
  657. u32 *p = (u32 *)array;
  658. data->pid = p[0];
  659. data->tid = p[1];
  660. array++;
  661. }
  662. if (type & PERF_SAMPLE_TIME) {
  663. data->time = *array;
  664. array++;
  665. }
  666. if (type & PERF_SAMPLE_ADDR) {
  667. data->addr = *array;
  668. array++;
  669. }
  670. data->id = -1ULL;
  671. if (type & PERF_SAMPLE_ID) {
  672. data->id = *array;
  673. array++;
  674. }
  675. if (type & PERF_SAMPLE_STREAM_ID) {
  676. data->stream_id = *array;
  677. array++;
  678. }
  679. if (type & PERF_SAMPLE_CPU) {
  680. u32 *p = (u32 *)array;
  681. data->cpu = *p;
  682. array++;
  683. } else
  684. data->cpu = -1;
  685. if (type & PERF_SAMPLE_PERIOD) {
  686. data->period = *array;
  687. array++;
  688. }
  689. if (type & PERF_SAMPLE_READ) {
  690. pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
  691. return -1;
  692. }
  693. if (type & PERF_SAMPLE_CALLCHAIN) {
  694. data->callchain = (struct ip_callchain *)array;
  695. array += 1 + data->callchain->nr;
  696. }
  697. if (type & PERF_SAMPLE_RAW) {
  698. u32 *p = (u32 *)array;
  699. data->raw_size = *p;
  700. p++;
  701. data->raw_data = p;
  702. }
  703. return 0;
  704. }