event.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. #include <linux/types.h>
  2. #include "event.h"
  3. #include "debug.h"
  4. #include "session.h"
  5. #include "sort.h"
  6. #include "string.h"
  7. #include "strlist.h"
  8. #include "thread.h"
  9. static const char *event__name[] = {
  10. [0] = "TOTAL",
  11. [PERF_RECORD_MMAP] = "MMAP",
  12. [PERF_RECORD_LOST] = "LOST",
  13. [PERF_RECORD_COMM] = "COMM",
  14. [PERF_RECORD_EXIT] = "EXIT",
  15. [PERF_RECORD_THROTTLE] = "THROTTLE",
  16. [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
  17. [PERF_RECORD_FORK] = "FORK",
  18. [PERF_RECORD_READ] = "READ",
  19. [PERF_RECORD_SAMPLE] = "SAMPLE",
  20. [PERF_RECORD_HEADER_ATTR] = "ATTR",
  21. [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
  22. [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
  23. [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
  24. [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
  25. };
  26. const char *event__get_event_name(unsigned int id)
  27. {
  28. if (id >= ARRAY_SIZE(event__name))
  29. return "INVALID";
  30. if (!event__name[id])
  31. return "UNKNOWN";
  32. return event__name[id];
  33. }
  34. static struct sample_data synth_sample = {
  35. .pid = -1,
  36. .tid = -1,
  37. .time = -1,
  38. .stream_id = -1,
  39. .cpu = -1,
  40. .period = 1,
  41. };
  42. static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
  43. event__handler_t process,
  44. struct perf_session *session)
  45. {
  46. char filename[PATH_MAX];
  47. char bf[BUFSIZ];
  48. FILE *fp;
  49. size_t size = 0;
  50. DIR *tasks;
  51. struct dirent dirent, *next;
  52. pid_t tgid = 0;
  53. snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  54. fp = fopen(filename, "r");
  55. if (fp == NULL) {
  56. out_race:
  57. /*
  58. * We raced with a task exiting - just return:
  59. */
  60. pr_debug("couldn't open %s\n", filename);
  61. return 0;
  62. }
  63. memset(&event->comm, 0, sizeof(event->comm));
  64. while (!event->comm.comm[0] || !event->comm.pid) {
  65. if (fgets(bf, sizeof(bf), fp) == NULL) {
  66. pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
  67. goto out;
  68. }
  69. if (memcmp(bf, "Name:", 5) == 0) {
  70. char *name = bf + 5;
  71. while (*name && isspace(*name))
  72. ++name;
  73. size = strlen(name) - 1;
  74. memcpy(event->comm.comm, name, size++);
  75. } else if (memcmp(bf, "Tgid:", 5) == 0) {
  76. char *tgids = bf + 5;
  77. while (*tgids && isspace(*tgids))
  78. ++tgids;
  79. tgid = event->comm.pid = atoi(tgids);
  80. }
  81. }
  82. event->comm.header.type = PERF_RECORD_COMM;
  83. size = ALIGN(size, sizeof(u64));
  84. memset(event->comm.comm + size, 0, session->id_hdr_size);
  85. event->comm.header.size = (sizeof(event->comm) -
  86. (sizeof(event->comm.comm) - size) +
  87. session->id_hdr_size);
  88. if (!full) {
  89. event->comm.tid = pid;
  90. process(event, &synth_sample, session);
  91. goto out;
  92. }
  93. snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
  94. tasks = opendir(filename);
  95. if (tasks == NULL)
  96. goto out_race;
  97. while (!readdir_r(tasks, &dirent, &next) && next) {
  98. char *end;
  99. pid = strtol(dirent.d_name, &end, 10);
  100. if (*end)
  101. continue;
  102. event->comm.tid = pid;
  103. process(event, &synth_sample, session);
  104. }
  105. closedir(tasks);
  106. out:
  107. fclose(fp);
  108. return tgid;
  109. }
  110. static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
  111. event__handler_t process,
  112. struct perf_session *session)
  113. {
  114. char filename[PATH_MAX];
  115. FILE *fp;
  116. snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
  117. fp = fopen(filename, "r");
  118. if (fp == NULL) {
  119. /*
  120. * We raced with a task exiting - just return:
  121. */
  122. pr_debug("couldn't open %s\n", filename);
  123. return -1;
  124. }
  125. event->header.type = PERF_RECORD_MMAP;
  126. /*
  127. * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
  128. */
  129. event->header.misc = PERF_RECORD_MISC_USER;
  130. while (1) {
  131. char bf[BUFSIZ], *pbf = bf;
  132. int n;
  133. size_t size;
  134. if (fgets(bf, sizeof(bf), fp) == NULL)
  135. break;
  136. /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
  137. n = hex2u64(pbf, &event->mmap.start);
  138. if (n < 0)
  139. continue;
  140. pbf += n + 1;
  141. n = hex2u64(pbf, &event->mmap.len);
  142. if (n < 0)
  143. continue;
  144. pbf += n + 3;
  145. if (*pbf == 'x') { /* vm_exec */
  146. char *execname = strchr(bf, '/');
  147. /* Catch VDSO */
  148. if (execname == NULL)
  149. execname = strstr(bf, "[vdso]");
  150. if (execname == NULL)
  151. continue;
  152. pbf += 3;
  153. n = hex2u64(pbf, &event->mmap.pgoff);
  154. size = strlen(execname);
  155. execname[size - 1] = '\0'; /* Remove \n */
  156. memcpy(event->mmap.filename, execname, size);
  157. size = ALIGN(size, sizeof(u64));
  158. event->mmap.len -= event->mmap.start;
  159. event->mmap.header.size = (sizeof(event->mmap) -
  160. (sizeof(event->mmap.filename) - size));
  161. memset(event->mmap.filename + size, 0, session->id_hdr_size);
  162. event->mmap.header.size += session->id_hdr_size;
  163. event->mmap.pid = tgid;
  164. event->mmap.tid = pid;
  165. process(event, &synth_sample, session);
  166. }
  167. }
  168. fclose(fp);
  169. return 0;
  170. }
  171. int event__synthesize_modules(event__handler_t process,
  172. struct perf_session *session,
  173. struct machine *machine)
  174. {
  175. struct rb_node *nd;
  176. struct map_groups *kmaps = &machine->kmaps;
  177. event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
  178. if (event == NULL) {
  179. pr_debug("Not enough memory synthesizing mmap event "
  180. "for kernel modules\n");
  181. return -1;
  182. }
  183. event->header.type = PERF_RECORD_MMAP;
  184. /*
  185. * kernel uses 0 for user space maps, see kernel/perf_event.c
  186. * __perf_event_mmap
  187. */
  188. if (machine__is_host(machine))
  189. event->header.misc = PERF_RECORD_MISC_KERNEL;
  190. else
  191. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  192. for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
  193. nd; nd = rb_next(nd)) {
  194. size_t size;
  195. struct map *pos = rb_entry(nd, struct map, rb_node);
  196. if (pos->dso->kernel)
  197. continue;
  198. size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
  199. event->mmap.header.type = PERF_RECORD_MMAP;
  200. event->mmap.header.size = (sizeof(event->mmap) -
  201. (sizeof(event->mmap.filename) - size));
  202. memset(event->mmap.filename + size, 0, session->id_hdr_size);
  203. event->mmap.header.size += session->id_hdr_size;
  204. event->mmap.start = pos->start;
  205. event->mmap.len = pos->end - pos->start;
  206. event->mmap.pid = machine->pid;
  207. memcpy(event->mmap.filename, pos->dso->long_name,
  208. pos->dso->long_name_len + 1);
  209. process(event, &synth_sample, session);
  210. }
  211. free(event);
  212. return 0;
  213. }
  214. static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
  215. pid_t pid, event__handler_t process,
  216. struct perf_session *session)
  217. {
  218. pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
  219. session);
  220. if (tgid == -1)
  221. return -1;
  222. return event__synthesize_mmap_events(mmap_event, pid, tgid,
  223. process, session);
  224. }
  225. int event__synthesize_thread_map(struct thread_map *threads,
  226. event__handler_t process,
  227. struct perf_session *session)
  228. {
  229. event_t *comm_event, *mmap_event;
  230. int err = -1, thread;
  231. comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
  232. if (comm_event == NULL)
  233. goto out;
  234. mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
  235. if (mmap_event == NULL)
  236. goto out_free_comm;
  237. err = 0;
  238. for (thread = 0; thread < threads->nr; ++thread) {
  239. if (__event__synthesize_thread(comm_event, mmap_event,
  240. threads->map[thread],
  241. process, session)) {
  242. err = -1;
  243. break;
  244. }
  245. }
  246. free(mmap_event);
  247. out_free_comm:
  248. free(comm_event);
  249. out:
  250. return err;
  251. }
  252. int event__synthesize_threads(event__handler_t process,
  253. struct perf_session *session)
  254. {
  255. DIR *proc;
  256. struct dirent dirent, *next;
  257. event_t *comm_event, *mmap_event;
  258. int err = -1;
  259. comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
  260. if (comm_event == NULL)
  261. goto out;
  262. mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size);
  263. if (mmap_event == NULL)
  264. goto out_free_comm;
  265. proc = opendir("/proc");
  266. if (proc == NULL)
  267. goto out_free_mmap;
  268. while (!readdir_r(proc, &dirent, &next) && next) {
  269. char *end;
  270. pid_t pid = strtol(dirent.d_name, &end, 10);
  271. if (*end) /* only interested in proper numerical dirents */
  272. continue;
  273. __event__synthesize_thread(comm_event, mmap_event, pid,
  274. process, session);
  275. }
  276. closedir(proc);
  277. err = 0;
  278. out_free_mmap:
  279. free(mmap_event);
  280. out_free_comm:
  281. free(comm_event);
  282. out:
  283. return err;
  284. }
  285. struct process_symbol_args {
  286. const char *name;
  287. u64 start;
  288. };
  289. static int find_symbol_cb(void *arg, const char *name, char type,
  290. u64 start, u64 end __used)
  291. {
  292. struct process_symbol_args *args = arg;
  293. /*
  294. * Must be a function or at least an alias, as in PARISC64, where "_text" is
  295. * an 'A' to the same address as "_stext".
  296. */
  297. if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
  298. type == 'A') || strcmp(name, args->name))
  299. return 0;
  300. args->start = start;
  301. return 1;
  302. }
  303. int event__synthesize_kernel_mmap(event__handler_t process,
  304. struct perf_session *session,
  305. struct machine *machine,
  306. const char *symbol_name)
  307. {
  308. size_t size;
  309. const char *filename, *mmap_name;
  310. char path[PATH_MAX];
  311. char name_buff[PATH_MAX];
  312. struct map *map;
  313. int err;
  314. /*
  315. * We should get this from /sys/kernel/sections/.text, but till that is
  316. * available use this, and after it is use this as a fallback for older
  317. * kernels.
  318. */
  319. struct process_symbol_args args = { .name = symbol_name, };
  320. event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
  321. if (event == NULL) {
  322. pr_debug("Not enough memory synthesizing mmap event "
  323. "for kernel modules\n");
  324. return -1;
  325. }
  326. mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
  327. if (machine__is_host(machine)) {
  328. /*
  329. * kernel uses PERF_RECORD_MISC_USER for user space maps,
  330. * see kernel/perf_event.c __perf_event_mmap
  331. */
  332. event->header.misc = PERF_RECORD_MISC_KERNEL;
  333. filename = "/proc/kallsyms";
  334. } else {
  335. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  336. if (machine__is_default_guest(machine))
  337. filename = (char *) symbol_conf.default_guest_kallsyms;
  338. else {
  339. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  340. filename = path;
  341. }
  342. }
  343. if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
  344. return -ENOENT;
  345. map = machine->vmlinux_maps[MAP__FUNCTION];
  346. size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
  347. "%s%s", mmap_name, symbol_name) + 1;
  348. size = ALIGN(size, sizeof(u64));
  349. event->mmap.header.type = PERF_RECORD_MMAP;
  350. event->mmap.header.size = (sizeof(event->mmap) -
  351. (sizeof(event->mmap.filename) - size) + session->id_hdr_size);
  352. event->mmap.pgoff = args.start;
  353. event->mmap.start = map->start;
  354. event->mmap.len = map->end - event->mmap.start;
  355. event->mmap.pid = machine->pid;
  356. err = process(event, &synth_sample, session);
  357. free(event);
  358. return err;
  359. }
  360. static void thread__comm_adjust(struct thread *self, struct hists *hists)
  361. {
  362. char *comm = self->comm;
  363. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  364. (!symbol_conf.comm_list ||
  365. strlist__has_entry(symbol_conf.comm_list, comm))) {
  366. u16 slen = strlen(comm);
  367. if (hists__new_col_len(hists, HISTC_COMM, slen))
  368. hists__set_col_len(hists, HISTC_THREAD, slen + 6);
  369. }
  370. }
  371. static int thread__set_comm_adjust(struct thread *self, const char *comm,
  372. struct hists *hists)
  373. {
  374. int ret = thread__set_comm(self, comm);
  375. if (ret)
  376. return ret;
  377. thread__comm_adjust(self, hists);
  378. return 0;
  379. }
  380. int event__process_comm(event_t *self, struct sample_data *sample __used,
  381. struct perf_session *session)
  382. {
  383. struct thread *thread = perf_session__findnew(session, self->comm.tid);
  384. dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
  385. if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
  386. &session->hists)) {
  387. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  388. return -1;
  389. }
  390. return 0;
  391. }
  392. int event__process_lost(event_t *self, struct sample_data *sample __used,
  393. struct perf_session *session)
  394. {
  395. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  396. self->lost.id, self->lost.lost);
  397. session->hists.stats.total_lost += self->lost.lost;
  398. return 0;
  399. }
  400. static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
  401. {
  402. maps[MAP__FUNCTION]->start = self->mmap.start;
  403. maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
  404. /*
  405. * Be a bit paranoid here, some perf.data file came with
  406. * a zero sized synthesized MMAP event for the kernel.
  407. */
  408. if (maps[MAP__FUNCTION]->end == 0)
  409. maps[MAP__FUNCTION]->end = ~0ULL;
  410. }
  411. static int event__process_kernel_mmap(event_t *self,
  412. struct perf_session *session)
  413. {
  414. struct map *map;
  415. char kmmap_prefix[PATH_MAX];
  416. struct machine *machine;
  417. enum dso_kernel_type kernel_type;
  418. bool is_kernel_mmap;
  419. machine = perf_session__findnew_machine(session, self->mmap.pid);
  420. if (!machine) {
  421. pr_err("Can't find id %d's machine\n", self->mmap.pid);
  422. goto out_problem;
  423. }
  424. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  425. if (machine__is_host(machine))
  426. kernel_type = DSO_TYPE_KERNEL;
  427. else
  428. kernel_type = DSO_TYPE_GUEST_KERNEL;
  429. is_kernel_mmap = memcmp(self->mmap.filename,
  430. kmmap_prefix,
  431. strlen(kmmap_prefix)) == 0;
  432. if (self->mmap.filename[0] == '/' ||
  433. (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
  434. char short_module_name[1024];
  435. char *name, *dot;
  436. if (self->mmap.filename[0] == '/') {
  437. name = strrchr(self->mmap.filename, '/');
  438. if (name == NULL)
  439. goto out_problem;
  440. ++name; /* skip / */
  441. dot = strrchr(name, '.');
  442. if (dot == NULL)
  443. goto out_problem;
  444. snprintf(short_module_name, sizeof(short_module_name),
  445. "[%.*s]", (int)(dot - name), name);
  446. strxfrchar(short_module_name, '-', '_');
  447. } else
  448. strcpy(short_module_name, self->mmap.filename);
  449. map = machine__new_module(machine, self->mmap.start,
  450. self->mmap.filename);
  451. if (map == NULL)
  452. goto out_problem;
  453. name = strdup(short_module_name);
  454. if (name == NULL)
  455. goto out_problem;
  456. map->dso->short_name = name;
  457. map->dso->sname_alloc = 1;
  458. map->end = map->start + self->mmap.len;
  459. } else if (is_kernel_mmap) {
  460. const char *symbol_name = (self->mmap.filename +
  461. strlen(kmmap_prefix));
  462. /*
  463. * Should be there already, from the build-id table in
  464. * the header.
  465. */
  466. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  467. kmmap_prefix);
  468. if (kernel == NULL)
  469. goto out_problem;
  470. kernel->kernel = kernel_type;
  471. if (__machine__create_kernel_maps(machine, kernel) < 0)
  472. goto out_problem;
  473. event_set_kernel_mmap_len(machine->vmlinux_maps, self);
  474. perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  475. symbol_name,
  476. self->mmap.pgoff);
  477. if (machine__is_default_guest(machine)) {
  478. /*
  479. * preload dso of guest kernel and modules
  480. */
  481. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  482. NULL);
  483. }
  484. }
  485. return 0;
  486. out_problem:
  487. return -1;
  488. }
  489. int event__process_mmap(event_t *self, struct sample_data *sample __used,
  490. struct perf_session *session)
  491. {
  492. struct machine *machine;
  493. struct thread *thread;
  494. struct map *map;
  495. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  496. int ret = 0;
  497. dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
  498. self->mmap.pid, self->mmap.tid, self->mmap.start,
  499. self->mmap.len, self->mmap.pgoff, self->mmap.filename);
  500. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  501. cpumode == PERF_RECORD_MISC_KERNEL) {
  502. ret = event__process_kernel_mmap(self, session);
  503. if (ret < 0)
  504. goto out_problem;
  505. return 0;
  506. }
  507. machine = perf_session__find_host_machine(session);
  508. if (machine == NULL)
  509. goto out_problem;
  510. thread = perf_session__findnew(session, self->mmap.pid);
  511. if (thread == NULL)
  512. goto out_problem;
  513. map = map__new(&machine->user_dsos, self->mmap.start,
  514. self->mmap.len, self->mmap.pgoff,
  515. self->mmap.pid, self->mmap.filename,
  516. MAP__FUNCTION);
  517. if (map == NULL)
  518. goto out_problem;
  519. thread__insert_map(thread, map);
  520. return 0;
  521. out_problem:
  522. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  523. return 0;
  524. }
  525. int event__process_task(event_t *self, struct sample_data *sample __used,
  526. struct perf_session *session)
  527. {
  528. struct thread *thread = perf_session__findnew(session, self->fork.tid);
  529. struct thread *parent = perf_session__findnew(session, self->fork.ptid);
  530. dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
  531. self->fork.ppid, self->fork.ptid);
  532. if (self->header.type == PERF_RECORD_EXIT) {
  533. perf_session__remove_thread(session, thread);
  534. return 0;
  535. }
  536. if (thread == NULL || parent == NULL ||
  537. thread__fork(thread, parent) < 0) {
  538. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  539. return -1;
  540. }
  541. return 0;
  542. }
  543. int event__process(event_t *event, struct sample_data *sample,
  544. struct perf_session *session)
  545. {
  546. switch (event->header.type) {
  547. case PERF_RECORD_COMM:
  548. event__process_comm(event, sample, session);
  549. break;
  550. case PERF_RECORD_MMAP:
  551. event__process_mmap(event, sample, session);
  552. break;
  553. case PERF_RECORD_FORK:
  554. case PERF_RECORD_EXIT:
  555. event__process_task(event, sample, session);
  556. break;
  557. default:
  558. break;
  559. }
  560. return 0;
  561. }
  562. void thread__find_addr_map(struct thread *self,
  563. struct perf_session *session, u8 cpumode,
  564. enum map_type type, pid_t pid, u64 addr,
  565. struct addr_location *al)
  566. {
  567. struct map_groups *mg = &self->mg;
  568. struct machine *machine = NULL;
  569. al->thread = self;
  570. al->addr = addr;
  571. al->cpumode = cpumode;
  572. al->filtered = false;
  573. if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
  574. al->level = 'k';
  575. machine = perf_session__find_host_machine(session);
  576. if (machine == NULL) {
  577. al->map = NULL;
  578. return;
  579. }
  580. mg = &machine->kmaps;
  581. } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
  582. al->level = '.';
  583. machine = perf_session__find_host_machine(session);
  584. } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
  585. al->level = 'g';
  586. machine = perf_session__find_machine(session, pid);
  587. if (machine == NULL) {
  588. al->map = NULL;
  589. return;
  590. }
  591. mg = &machine->kmaps;
  592. } else {
  593. /*
  594. * 'u' means guest os user space.
  595. * TODO: We don't support guest user space. Might support late.
  596. */
  597. if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
  598. al->level = 'u';
  599. else
  600. al->level = 'H';
  601. al->map = NULL;
  602. if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
  603. cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
  604. !perf_guest)
  605. al->filtered = true;
  606. if ((cpumode == PERF_RECORD_MISC_USER ||
  607. cpumode == PERF_RECORD_MISC_KERNEL) &&
  608. !perf_host)
  609. al->filtered = true;
  610. return;
  611. }
  612. try_again:
  613. al->map = map_groups__find(mg, type, al->addr);
  614. if (al->map == NULL) {
  615. /*
  616. * If this is outside of all known maps, and is a negative
  617. * address, try to look it up in the kernel dso, as it might be
  618. * a vsyscall or vdso (which executes in user-mode).
  619. *
  620. * XXX This is nasty, we should have a symbol list in the
  621. * "[vdso]" dso, but for now lets use the old trick of looking
  622. * in the whole kernel symbol list.
  623. */
  624. if ((long long)al->addr < 0 &&
  625. cpumode == PERF_RECORD_MISC_KERNEL &&
  626. machine && mg != &machine->kmaps) {
  627. mg = &machine->kmaps;
  628. goto try_again;
  629. }
  630. } else
  631. al->addr = al->map->map_ip(al->map, al->addr);
  632. }
  633. void thread__find_addr_location(struct thread *self,
  634. struct perf_session *session, u8 cpumode,
  635. enum map_type type, pid_t pid, u64 addr,
  636. struct addr_location *al,
  637. symbol_filter_t filter)
  638. {
  639. thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
  640. if (al->map != NULL)
  641. al->sym = map__find_symbol(al->map, al->addr, filter);
  642. else
  643. al->sym = NULL;
  644. }
  645. static void dso__calc_col_width(struct dso *self, struct hists *hists)
  646. {
  647. if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  648. (!symbol_conf.dso_list ||
  649. strlist__has_entry(symbol_conf.dso_list, self->name))) {
  650. u16 slen = dso__name_len(self);
  651. hists__new_col_len(hists, HISTC_DSO, slen);
  652. }
  653. self->slen_calculated = 1;
  654. }
  655. int event__preprocess_sample(const event_t *self, struct perf_session *session,
  656. struct addr_location *al, struct sample_data *data,
  657. symbol_filter_t filter)
  658. {
  659. u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  660. struct thread *thread = perf_session__findnew(session, self->ip.pid);
  661. if (thread == NULL)
  662. return -1;
  663. if (symbol_conf.comm_list &&
  664. !strlist__has_entry(symbol_conf.comm_list, thread->comm))
  665. goto out_filtered;
  666. dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
  667. /*
  668. * Have we already created the kernel maps for the host machine?
  669. *
  670. * This should have happened earlier, when we processed the kernel MMAP
  671. * events, but for older perf.data files there was no such thing, so do
  672. * it now.
  673. */
  674. if (cpumode == PERF_RECORD_MISC_KERNEL &&
  675. session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
  676. machine__create_kernel_maps(&session->host_machine);
  677. thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
  678. self->ip.pid, self->ip.ip, al);
  679. dump_printf(" ...... dso: %s\n",
  680. al->map ? al->map->dso->long_name :
  681. al->level == 'H' ? "[hypervisor]" : "<not found>");
  682. al->sym = NULL;
  683. al->cpu = data->cpu;
  684. if (al->map) {
  685. if (symbol_conf.dso_list &&
  686. (!al->map || !al->map->dso ||
  687. !(strlist__has_entry(symbol_conf.dso_list,
  688. al->map->dso->short_name) ||
  689. (al->map->dso->short_name != al->map->dso->long_name &&
  690. strlist__has_entry(symbol_conf.dso_list,
  691. al->map->dso->long_name)))))
  692. goto out_filtered;
  693. /*
  694. * We have to do this here as we may have a dso with no symbol
  695. * hit that has a name longer than the ones with symbols
  696. * sampled.
  697. */
  698. if (!sort_dso.elide && !al->map->dso->slen_calculated)
  699. dso__calc_col_width(al->map->dso, &session->hists);
  700. al->sym = map__find_symbol(al->map, al->addr, filter);
  701. } else {
  702. const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  703. if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
  704. !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  705. !symbol_conf.dso_list)
  706. hists__set_col_len(&session->hists, HISTC_DSO,
  707. unresolved_col_width);
  708. }
  709. if (symbol_conf.sym_list && al->sym &&
  710. !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
  711. goto out_filtered;
  712. return 0;
  713. out_filtered:
  714. al->filtered = true;
  715. return 0;
  716. }
  717. static int event__parse_id_sample(const event_t *event,
  718. struct perf_session *session,
  719. struct sample_data *sample)
  720. {
  721. const u64 *array;
  722. u64 type;
  723. sample->cpu = sample->pid = sample->tid = -1;
  724. sample->stream_id = sample->id = sample->time = -1ULL;
  725. if (!session->sample_id_all)
  726. return 0;
  727. array = event->sample.array;
  728. array += ((event->header.size -
  729. sizeof(event->header)) / sizeof(u64)) - 1;
  730. type = session->sample_type;
  731. if (type & PERF_SAMPLE_CPU) {
  732. u32 *p = (u32 *)array;
  733. sample->cpu = *p;
  734. array--;
  735. }
  736. if (type & PERF_SAMPLE_STREAM_ID) {
  737. sample->stream_id = *array;
  738. array--;
  739. }
  740. if (type & PERF_SAMPLE_ID) {
  741. sample->id = *array;
  742. array--;
  743. }
  744. if (type & PERF_SAMPLE_TIME) {
  745. sample->time = *array;
  746. array--;
  747. }
  748. if (type & PERF_SAMPLE_TID) {
  749. u32 *p = (u32 *)array;
  750. sample->pid = p[0];
  751. sample->tid = p[1];
  752. }
  753. return 0;
  754. }
  755. int event__parse_sample(const event_t *event, struct perf_session *session,
  756. struct sample_data *data)
  757. {
  758. const u64 *array;
  759. u64 type;
  760. if (event->header.type != PERF_RECORD_SAMPLE)
  761. return event__parse_id_sample(event, session, data);
  762. array = event->sample.array;
  763. type = session->sample_type;
  764. if (type & PERF_SAMPLE_IP) {
  765. data->ip = event->ip.ip;
  766. array++;
  767. }
  768. if (type & PERF_SAMPLE_TID) {
  769. u32 *p = (u32 *)array;
  770. data->pid = p[0];
  771. data->tid = p[1];
  772. array++;
  773. }
  774. if (type & PERF_SAMPLE_TIME) {
  775. data->time = *array;
  776. array++;
  777. }
  778. if (type & PERF_SAMPLE_ADDR) {
  779. data->addr = *array;
  780. array++;
  781. }
  782. data->id = -1ULL;
  783. if (type & PERF_SAMPLE_ID) {
  784. data->id = *array;
  785. array++;
  786. }
  787. if (type & PERF_SAMPLE_STREAM_ID) {
  788. data->stream_id = *array;
  789. array++;
  790. }
  791. if (type & PERF_SAMPLE_CPU) {
  792. u32 *p = (u32 *)array;
  793. data->cpu = *p;
  794. array++;
  795. } else
  796. data->cpu = -1;
  797. if (type & PERF_SAMPLE_PERIOD) {
  798. data->period = *array;
  799. array++;
  800. }
  801. if (type & PERF_SAMPLE_READ) {
  802. pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
  803. return -1;
  804. }
  805. if (type & PERF_SAMPLE_CALLCHAIN) {
  806. data->callchain = (struct ip_callchain *)array;
  807. array += 1 + data->callchain->nr;
  808. }
  809. if (type & PERF_SAMPLE_RAW) {
  810. u32 *p = (u32 *)array;
  811. data->raw_size = *p;
  812. p++;
  813. data->raw_data = p;
  814. }
  815. return 0;
  816. }