event.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. #include <linux/types.h>
  2. #include "event.h"
  3. #include "debug.h"
  4. #include "sort.h"
  5. #include "string.h"
  6. #include "strlist.h"
  7. #include "thread.h"
  8. #include "thread_map.h"
  9. static const char *perf_event__names[] = {
  10. [0] = "TOTAL",
  11. [PERF_RECORD_MMAP] = "MMAP",
  12. [PERF_RECORD_LOST] = "LOST",
  13. [PERF_RECORD_COMM] = "COMM",
  14. [PERF_RECORD_EXIT] = "EXIT",
  15. [PERF_RECORD_THROTTLE] = "THROTTLE",
  16. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  17. [PERF_RECORD_FORK] = "FORK",
  18. [PERF_RECORD_READ] = "READ",
  19. [PERF_RECORD_SAMPLE] = "SAMPLE",
  20. [PERF_RECORD_HEADER_ATTR] = "ATTR",
  21. [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  22. [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  23. [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  24. [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
  25. };
  26. const char *perf_event__name(unsigned int id)
  27. {
  28. if (id >= ARRAY_SIZE(perf_event__names))
  29. return "INVALID";
  30. if (!perf_event__names[id])
  31. return "UNKNOWN";
  32. return perf_event__names[id];
  33. }
  34. static struct perf_sample synth_sample = {
  35. .pid = -1,
  36. .tid = -1,
  37. .time = -1,
  38. .stream_id = -1,
  39. .cpu = -1,
  40. .period = 1,
  41. };
  42. static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
  43. union perf_event *event, pid_t pid,
  44. int full, perf_event__handler_t process,
  45. struct machine *machine)
  46. {
  47. char filename[PATH_MAX];
  48. char bf[BUFSIZ];
  49. FILE *fp;
  50. size_t size = 0;
  51. DIR *tasks;
  52. struct dirent dirent, *next;
  53. pid_t tgid = 0;
  54. snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  55. fp = fopen(filename, "r");
  56. if (fp == NULL) {
  57. out_race:
  58. /*
  59. * We raced with a task exiting - just return:
  60. */
  61. pr_debug("couldn't open %s\n", filename);
  62. return 0;
  63. }
  64. memset(&event->comm, 0, sizeof(event->comm));
  65. while (!event->comm.comm[0] || !event->comm.pid) {
  66. if (fgets(bf, sizeof(bf), fp) == NULL) {
  67. pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
  68. goto out;
  69. }
  70. if (memcmp(bf, "Name:", 5) == 0) {
  71. char *name = bf + 5;
  72. while (*name && isspace(*name))
  73. ++name;
  74. size = strlen(name) - 1;
  75. memcpy(event->comm.comm, name, size++);
  76. } else if (memcmp(bf, "Tgid:", 5) == 0) {
  77. char *tgids = bf + 5;
  78. while (*tgids && isspace(*tgids))
  79. ++tgids;
  80. tgid = event->comm.pid = atoi(tgids);
  81. }
  82. }
  83. event->comm.header.type = PERF_RECORD_COMM;
  84. size = ALIGN(size, sizeof(u64));
  85. memset(event->comm.comm + size, 0, machine->id_hdr_size);
  86. event->comm.header.size = (sizeof(event->comm) -
  87. (sizeof(event->comm.comm) - size) +
  88. machine->id_hdr_size);
  89. if (!full) {
  90. event->comm.tid = pid;
  91. process(tool, event, &synth_sample, machine);
  92. goto out;
  93. }
  94. snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
  95. tasks = opendir(filename);
  96. if (tasks == NULL)
  97. goto out_race;
  98. while (!readdir_r(tasks, &dirent, &next) && next) {
  99. char *end;
  100. pid = strtol(dirent.d_name, &end, 10);
  101. if (*end)
  102. continue;
  103. event->comm.tid = pid;
  104. process(tool, event, &synth_sample, machine);
  105. }
  106. closedir(tasks);
  107. out:
  108. fclose(fp);
  109. return tgid;
  110. }
  111. static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  112. union perf_event *event,
  113. pid_t pid, pid_t tgid,
  114. perf_event__handler_t process,
  115. struct machine *machine)
  116. {
  117. char filename[PATH_MAX];
  118. FILE *fp;
  119. snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
  120. fp = fopen(filename, "r");
  121. if (fp == NULL) {
  122. /*
  123. * We raced with a task exiting - just return:
  124. */
  125. pr_debug("couldn't open %s\n", filename);
  126. return -1;
  127. }
  128. event->header.type = PERF_RECORD_MMAP;
  129. /*
  130. * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
  131. */
  132. event->header.misc = PERF_RECORD_MISC_USER;
  133. while (1) {
  134. char bf[BUFSIZ], *pbf = bf;
  135. int n;
  136. size_t size;
  137. if (fgets(bf, sizeof(bf), fp) == NULL)
  138. break;
  139. /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
  140. n = hex2u64(pbf, &event->mmap.start);
  141. if (n < 0)
  142. continue;
  143. pbf += n + 1;
  144. n = hex2u64(pbf, &event->mmap.len);
  145. if (n < 0)
  146. continue;
  147. pbf += n + 3;
  148. if (*pbf == 'x') { /* vm_exec */
  149. char anonstr[] = "//anon\n";
  150. char *execname = strchr(bf, '/');
  151. /* Catch VDSO */
  152. if (execname == NULL)
  153. execname = strstr(bf, "[vdso]");
  154. /* Catch anonymous mmaps */
  155. if ((execname == NULL) && !strstr(bf, "["))
  156. execname = anonstr;
  157. if (execname == NULL)
  158. continue;
  159. pbf += 3;
  160. n = hex2u64(pbf, &event->mmap.pgoff);
  161. size = strlen(execname);
  162. execname[size - 1] = '\0'; /* Remove \n */
  163. memcpy(event->mmap.filename, execname, size);
  164. size = ALIGN(size, sizeof(u64));
  165. event->mmap.len -= event->mmap.start;
  166. event->mmap.header.size = (sizeof(event->mmap) -
  167. (sizeof(event->mmap.filename) - size));
  168. memset(event->mmap.filename + size, 0, machine->id_hdr_size);
  169. event->mmap.header.size += machine->id_hdr_size;
  170. event->mmap.pid = tgid;
  171. event->mmap.tid = pid;
  172. process(tool, event, &synth_sample, machine);
  173. }
  174. }
  175. fclose(fp);
  176. return 0;
  177. }
  178. int perf_event__synthesize_modules(struct perf_tool *tool,
  179. perf_event__handler_t process,
  180. struct machine *machine)
  181. {
  182. struct rb_node *nd;
  183. struct map_groups *kmaps = &machine->kmaps;
  184. union perf_event *event = zalloc((sizeof(event->mmap) +
  185. machine->id_hdr_size));
  186. if (event == NULL) {
  187. pr_debug("Not enough memory synthesizing mmap event "
  188. "for kernel modules\n");
  189. return -1;
  190. }
  191. event->header.type = PERF_RECORD_MMAP;
  192. /*
  193. * kernel uses 0 for user space maps, see kernel/perf_event.c
  194. * __perf_event_mmap
  195. */
  196. if (machine__is_host(machine))
  197. event->header.misc = PERF_RECORD_MISC_KERNEL;
  198. else
  199. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  200. for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
  201. nd; nd = rb_next(nd)) {
  202. size_t size;
  203. struct map *pos = rb_entry(nd, struct map, rb_node);
  204. if (pos->dso->kernel)
  205. continue;
  206. size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
  207. event->mmap.header.type = PERF_RECORD_MMAP;
  208. event->mmap.header.size = (sizeof(event->mmap) -
  209. (sizeof(event->mmap.filename) - size));
  210. memset(event->mmap.filename + size, 0, machine->id_hdr_size);
  211. event->mmap.header.size += machine->id_hdr_size;
  212. event->mmap.start = pos->start;
  213. event->mmap.len = pos->end - pos->start;
  214. event->mmap.pid = machine->pid;
  215. memcpy(event->mmap.filename, pos->dso->long_name,
  216. pos->dso->long_name_len + 1);
  217. process(tool, event, &synth_sample, machine);
  218. }
  219. free(event);
  220. return 0;
  221. }
  222. static int __event__synthesize_thread(union perf_event *comm_event,
  223. union perf_event *mmap_event,
  224. pid_t pid, perf_event__handler_t process,
  225. struct perf_tool *tool,
  226. struct machine *machine)
  227. {
  228. pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, 1,
  229. process, machine);
  230. if (tgid == -1)
  231. return -1;
  232. return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
  233. process, machine);
  234. }
  235. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  236. struct thread_map *threads,
  237. perf_event__handler_t process,
  238. struct machine *machine)
  239. {
  240. union perf_event *comm_event, *mmap_event;
  241. int err = -1, thread;
  242. comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
  243. if (comm_event == NULL)
  244. goto out;
  245. mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
  246. if (mmap_event == NULL)
  247. goto out_free_comm;
  248. err = 0;
  249. for (thread = 0; thread < threads->nr; ++thread) {
  250. if (__event__synthesize_thread(comm_event, mmap_event,
  251. threads->map[thread],
  252. process, tool, machine)) {
  253. err = -1;
  254. break;
  255. }
  256. }
  257. free(mmap_event);
  258. out_free_comm:
  259. free(comm_event);
  260. out:
  261. return err;
  262. }
  263. int perf_event__synthesize_threads(struct perf_tool *tool,
  264. perf_event__handler_t process,
  265. struct machine *machine)
  266. {
  267. DIR *proc;
  268. struct dirent dirent, *next;
  269. union perf_event *comm_event, *mmap_event;
  270. int err = -1;
  271. comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
  272. if (comm_event == NULL)
  273. goto out;
  274. mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
  275. if (mmap_event == NULL)
  276. goto out_free_comm;
  277. proc = opendir("/proc");
  278. if (proc == NULL)
  279. goto out_free_mmap;
  280. while (!readdir_r(proc, &dirent, &next) && next) {
  281. char *end;
  282. pid_t pid = strtol(dirent.d_name, &end, 10);
  283. if (*end) /* only interested in proper numerical dirents */
  284. continue;
  285. __event__synthesize_thread(comm_event, mmap_event, pid,
  286. process, tool, machine);
  287. }
  288. closedir(proc);
  289. err = 0;
  290. out_free_mmap:
  291. free(mmap_event);
  292. out_free_comm:
  293. free(comm_event);
  294. out:
  295. return err;
  296. }
  297. struct process_symbol_args {
  298. const char *name;
  299. u64 start;
  300. };
  301. static int find_symbol_cb(void *arg, const char *name, char type,
  302. u64 start, u64 end __used)
  303. {
  304. struct process_symbol_args *args = arg;
  305. /*
  306. * Must be a function or at least an alias, as in PARISC64, where "_text" is
  307. * an 'A' to the same address as "_stext".
  308. */
  309. if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
  310. type == 'A') || strcmp(name, args->name))
  311. return 0;
  312. args->start = start;
  313. return 1;
  314. }
  315. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  316. perf_event__handler_t process,
  317. struct machine *machine,
  318. const char *symbol_name)
  319. {
  320. size_t size;
  321. const char *filename, *mmap_name;
  322. char path[PATH_MAX];
  323. char name_buff[PATH_MAX];
  324. struct map *map;
  325. int err;
  326. /*
  327. * We should get this from /sys/kernel/sections/.text, but till that is
  328. * available use this, and after it is use this as a fallback for older
  329. * kernels.
  330. */
  331. struct process_symbol_args args = { .name = symbol_name, };
  332. union perf_event *event = zalloc((sizeof(event->mmap) +
  333. machine->id_hdr_size));
  334. if (event == NULL) {
  335. pr_debug("Not enough memory synthesizing mmap event "
  336. "for kernel modules\n");
  337. return -1;
  338. }
  339. mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
  340. if (machine__is_host(machine)) {
  341. /*
  342. * kernel uses PERF_RECORD_MISC_USER for user space maps,
  343. * see kernel/perf_event.c __perf_event_mmap
  344. */
  345. event->header.misc = PERF_RECORD_MISC_KERNEL;
  346. filename = "/proc/kallsyms";
  347. } else {
  348. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  349. if (machine__is_default_guest(machine))
  350. filename = (char *) symbol_conf.default_guest_kallsyms;
  351. else {
  352. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  353. filename = path;
  354. }
  355. }
  356. if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
  357. return -ENOENT;
  358. map = machine->vmlinux_maps[MAP__FUNCTION];
  359. size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
  360. "%s%s", mmap_name, symbol_name) + 1;
  361. size = ALIGN(size, sizeof(u64));
  362. event->mmap.header.type = PERF_RECORD_MMAP;
  363. event->mmap.header.size = (sizeof(event->mmap) -
  364. (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
  365. event->mmap.pgoff = args.start;
  366. event->mmap.start = map->start;
  367. event->mmap.len = map->end - event->mmap.start;
  368. event->mmap.pid = machine->pid;
  369. err = process(tool, event, &synth_sample, machine);
  370. free(event);
  371. return err;
  372. }
  373. int perf_event__process_comm(struct perf_tool *tool __used,
  374. union perf_event *event,
  375. struct perf_sample *sample __used,
  376. struct machine *machine)
  377. {
  378. struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
  379. dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
  380. if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
  381. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  382. return -1;
  383. }
  384. return 0;
  385. }
  386. int perf_event__process_lost(struct perf_tool *tool __used,
  387. union perf_event *event,
  388. struct perf_sample *sample __used,
  389. struct machine *machine __used)
  390. {
  391. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  392. event->lost.id, event->lost.lost);
  393. return 0;
  394. }
  395. static void perf_event__set_kernel_mmap_len(union perf_event *event,
  396. struct map **maps)
  397. {
  398. maps[MAP__FUNCTION]->start = event->mmap.start;
  399. maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
  400. /*
  401. * Be a bit paranoid here, some perf.data file came with
  402. * a zero sized synthesized MMAP event for the kernel.
  403. */
  404. if (maps[MAP__FUNCTION]->end == 0)
  405. maps[MAP__FUNCTION]->end = ~0ULL;
  406. }
  407. static int perf_event__process_kernel_mmap(struct perf_tool *tool __used,
  408. union perf_event *event,
  409. struct machine *machine)
  410. {
  411. struct map *map;
  412. char kmmap_prefix[PATH_MAX];
  413. enum dso_kernel_type kernel_type;
  414. bool is_kernel_mmap;
  415. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  416. if (machine__is_host(machine))
  417. kernel_type = DSO_TYPE_KERNEL;
  418. else
  419. kernel_type = DSO_TYPE_GUEST_KERNEL;
  420. is_kernel_mmap = memcmp(event->mmap.filename,
  421. kmmap_prefix,
  422. strlen(kmmap_prefix)) == 0;
  423. if (event->mmap.filename[0] == '/' ||
  424. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  425. char short_module_name[1024];
  426. char *name, *dot;
  427. if (event->mmap.filename[0] == '/') {
  428. name = strrchr(event->mmap.filename, '/');
  429. if (name == NULL)
  430. goto out_problem;
  431. ++name; /* skip / */
  432. dot = strrchr(name, '.');
  433. if (dot == NULL)
  434. goto out_problem;
  435. snprintf(short_module_name, sizeof(short_module_name),
  436. "[%.*s]", (int)(dot - name), name);
  437. strxfrchar(short_module_name, '-', '_');
  438. } else
  439. strcpy(short_module_name, event->mmap.filename);
  440. map = machine__new_module(machine, event->mmap.start,
  441. event->mmap.filename);
  442. if (map == NULL)
  443. goto out_problem;
  444. name = strdup(short_module_name);
  445. if (name == NULL)
  446. goto out_problem;
  447. map->dso->short_name = name;
  448. map->dso->sname_alloc = 1;
  449. map->end = map->start + event->mmap.len;
  450. } else if (is_kernel_mmap) {
  451. const char *symbol_name = (event->mmap.filename +
  452. strlen(kmmap_prefix));
  453. /*
  454. * Should be there already, from the build-id table in
  455. * the header.
  456. */
  457. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  458. kmmap_prefix);
  459. if (kernel == NULL)
  460. goto out_problem;
  461. kernel->kernel = kernel_type;
  462. if (__machine__create_kernel_maps(machine, kernel) < 0)
  463. goto out_problem;
  464. perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
  465. /*
  466. * Avoid using a zero address (kptr_restrict) for the ref reloc
  467. * symbol. Effectively having zero here means that at record
  468. * time /proc/sys/kernel/kptr_restrict was non zero.
  469. */
  470. if (event->mmap.pgoff != 0) {
  471. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  472. symbol_name,
  473. event->mmap.pgoff);
  474. }
  475. if (machine__is_default_guest(machine)) {
  476. /*
  477. * preload dso of guest kernel and modules
  478. */
  479. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  480. NULL);
  481. }
  482. }
  483. return 0;
  484. out_problem:
  485. return -1;
  486. }
  487. int perf_event__process_mmap(struct perf_tool *tool,
  488. union perf_event *event,
  489. struct perf_sample *sample __used,
  490. struct machine *machine)
  491. {
  492. struct thread *thread;
  493. struct map *map;
  494. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  495. int ret = 0;
  496. dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
  497. event->mmap.pid, event->mmap.tid, event->mmap.start,
  498. event->mmap.len, event->mmap.pgoff, event->mmap.filename);
  499. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  500. cpumode == PERF_RECORD_MISC_KERNEL) {
  501. ret = perf_event__process_kernel_mmap(tool, event, machine);
  502. if (ret < 0)
  503. goto out_problem;
  504. return 0;
  505. }
  506. thread = machine__findnew_thread(machine, event->mmap.pid);
  507. if (thread == NULL)
  508. goto out_problem;
  509. map = map__new(&machine->user_dsos, event->mmap.start,
  510. event->mmap.len, event->mmap.pgoff,
  511. event->mmap.pid, event->mmap.filename,
  512. MAP__FUNCTION);
  513. if (map == NULL)
  514. goto out_problem;
  515. thread__insert_map(thread, map);
  516. return 0;
  517. out_problem:
  518. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  519. return 0;
  520. }
  521. int perf_event__process_task(struct perf_tool *tool __used,
  522. union perf_event *event,
  523. struct perf_sample *sample __used,
  524. struct machine *machine)
  525. {
  526. struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
  527. struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
  528. dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
  529. event->fork.ppid, event->fork.ptid);
  530. if (event->header.type == PERF_RECORD_EXIT) {
  531. machine__remove_thread(machine, thread);
  532. return 0;
  533. }
  534. if (thread == NULL || parent == NULL ||
  535. thread__fork(thread, parent) < 0) {
  536. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  537. return -1;
  538. }
  539. return 0;
  540. }
  541. int perf_event__process(struct perf_tool *tool, union perf_event *event,
  542. struct perf_sample *sample, struct machine *machine)
  543. {
  544. switch (event->header.type) {
  545. case PERF_RECORD_COMM:
  546. perf_event__process_comm(tool, event, sample, machine);
  547. break;
  548. case PERF_RECORD_MMAP:
  549. perf_event__process_mmap(tool, event, sample, machine);
  550. break;
  551. case PERF_RECORD_FORK:
  552. case PERF_RECORD_EXIT:
  553. perf_event__process_task(tool, event, sample, machine);
  554. break;
  555. case PERF_RECORD_LOST:
  556. perf_event__process_lost(tool, event, sample, machine);
  557. default:
  558. break;
  559. }
  560. return 0;
  561. }
  562. void thread__find_addr_map(struct thread *self,
  563. struct machine *machine, u8 cpumode,
  564. enum map_type type, u64 addr,
  565. struct addr_location *al)
  566. {
  567. struct map_groups *mg = &self->mg;
  568. al->thread = self;
  569. al->addr = addr;
  570. al->cpumode = cpumode;
  571. al->filtered = false;
  572. if (machine == NULL) {
  573. al->map = NULL;
  574. return;
  575. }
  576. if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
  577. al->level = 'k';
  578. mg = &machine->kmaps;
  579. } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
  580. al->level = '.';
  581. } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
  582. al->level = 'g';
  583. mg = &machine->kmaps;
  584. } else {
  585. /*
  586. * 'u' means guest os user space.
  587. * TODO: We don't support guest user space. Might support late.
  588. */
  589. if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
  590. al->level = 'u';
  591. else
  592. al->level = 'H';
  593. al->map = NULL;
  594. if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
  595. cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
  596. !perf_guest)
  597. al->filtered = true;
  598. if ((cpumode == PERF_RECORD_MISC_USER ||
  599. cpumode == PERF_RECORD_MISC_KERNEL) &&
  600. !perf_host)
  601. al->filtered = true;
  602. return;
  603. }
  604. try_again:
  605. al->map = map_groups__find(mg, type, al->addr);
  606. if (al->map == NULL) {
  607. /*
  608. * If this is outside of all known maps, and is a negative
  609. * address, try to look it up in the kernel dso, as it might be
  610. * a vsyscall or vdso (which executes in user-mode).
  611. *
  612. * XXX This is nasty, we should have a symbol list in the
  613. * "[vdso]" dso, but for now lets use the old trick of looking
  614. * in the whole kernel symbol list.
  615. */
  616. if ((long long)al->addr < 0 &&
  617. cpumode == PERF_RECORD_MISC_USER &&
  618. machine && mg != &machine->kmaps) {
  619. mg = &machine->kmaps;
  620. goto try_again;
  621. }
  622. } else
  623. al->addr = al->map->map_ip(al->map, al->addr);
  624. }
  625. void thread__find_addr_location(struct thread *thread, struct machine *machine,
  626. u8 cpumode, enum map_type type, u64 addr,
  627. struct addr_location *al,
  628. symbol_filter_t filter)
  629. {
  630. thread__find_addr_map(thread, machine, cpumode, type, addr, al);
  631. if (al->map != NULL)
  632. al->sym = map__find_symbol(al->map, al->addr, filter);
  633. else
  634. al->sym = NULL;
  635. }
  636. int perf_event__preprocess_sample(const union perf_event *event,
  637. struct machine *machine,
  638. struct addr_location *al,
  639. struct perf_sample *sample,
  640. symbol_filter_t filter)
  641. {
  642. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  643. struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
  644. if (thread == NULL)
  645. return -1;
  646. if (symbol_conf.comm_list &&
  647. !strlist__has_entry(symbol_conf.comm_list, thread->comm))
  648. goto out_filtered;
  649. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  650. /*
  651. * Have we already created the kernel maps for this machine?
  652. *
  653. * This should have happened earlier, when we processed the kernel MMAP
  654. * events, but for older perf.data files there was no such thing, so do
  655. * it now.
  656. */
  657. if (cpumode == PERF_RECORD_MISC_KERNEL &&
  658. machine->vmlinux_maps[MAP__FUNCTION] == NULL)
  659. machine__create_kernel_maps(machine);
  660. thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
  661. event->ip.ip, al);
  662. dump_printf(" ...... dso: %s\n",
  663. al->map ? al->map->dso->long_name :
  664. al->level == 'H' ? "[hypervisor]" : "<not found>");
  665. al->sym = NULL;
  666. al->cpu = sample->cpu;
  667. if (al->map) {
  668. if (symbol_conf.dso_list &&
  669. (!al->map || !al->map->dso ||
  670. !(strlist__has_entry(symbol_conf.dso_list,
  671. al->map->dso->short_name) ||
  672. (al->map->dso->short_name != al->map->dso->long_name &&
  673. strlist__has_entry(symbol_conf.dso_list,
  674. al->map->dso->long_name)))))
  675. goto out_filtered;
  676. al->sym = map__find_symbol(al->map, al->addr, filter);
  677. }
  678. if (symbol_conf.sym_list && al->sym &&
  679. !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
  680. goto out_filtered;
  681. return 0;
  682. out_filtered:
  683. al->filtered = true;
  684. return 0;
  685. }