machine.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. #include "debug.h"
  2. #include "event.h"
  3. #include "machine.h"
  4. #include "map.h"
  5. #include "thread.h"
  6. #include <stdbool.h>
  7. static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
  8. bool create)
  9. {
  10. struct rb_node **p = &machine->threads.rb_node;
  11. struct rb_node *parent = NULL;
  12. struct thread *th;
  13. /*
  14. * Font-end cache - PID lookups come in blocks,
  15. * so most of the time we dont have to look up
  16. * the full rbtree:
  17. */
  18. if (machine->last_match && machine->last_match->pid == pid)
  19. return machine->last_match;
  20. while (*p != NULL) {
  21. parent = *p;
  22. th = rb_entry(parent, struct thread, rb_node);
  23. if (th->pid == pid) {
  24. machine->last_match = th;
  25. return th;
  26. }
  27. if (pid < th->pid)
  28. p = &(*p)->rb_left;
  29. else
  30. p = &(*p)->rb_right;
  31. }
  32. if (!create)
  33. return NULL;
  34. th = thread__new(pid);
  35. if (th != NULL) {
  36. rb_link_node(&th->rb_node, parent, p);
  37. rb_insert_color(&th->rb_node, &machine->threads);
  38. machine->last_match = th;
  39. }
  40. return th;
  41. }
  42. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
  43. {
  44. return __machine__findnew_thread(machine, pid, true);
  45. }
  46. struct thread *machine__find_thread(struct machine *machine, pid_t pid)
  47. {
  48. return __machine__findnew_thread(machine, pid, false);
  49. }
  50. int machine__process_comm_event(struct machine *machine, union perf_event *event)
  51. {
  52. struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
  53. if (dump_trace)
  54. perf_event__fprintf_comm(event, stdout);
  55. if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
  56. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  57. return -1;
  58. }
  59. return 0;
  60. }
  61. int machine__process_lost_event(struct machine *machine __maybe_unused,
  62. union perf_event *event)
  63. {
  64. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  65. event->lost.id, event->lost.lost);
  66. return 0;
  67. }
  68. static void machine__set_kernel_mmap_len(struct machine *machine,
  69. union perf_event *event)
  70. {
  71. int i;
  72. for (i = 0; i < MAP__NR_TYPES; i++) {
  73. machine->vmlinux_maps[i]->start = event->mmap.start;
  74. machine->vmlinux_maps[i]->end = (event->mmap.start +
  75. event->mmap.len);
  76. /*
  77. * Be a bit paranoid here, some perf.data file came with
  78. * a zero sized synthesized MMAP event for the kernel.
  79. */
  80. if (machine->vmlinux_maps[i]->end == 0)
  81. machine->vmlinux_maps[i]->end = ~0ULL;
  82. }
  83. }
  84. static int machine__process_kernel_mmap_event(struct machine *machine,
  85. union perf_event *event)
  86. {
  87. struct map *map;
  88. char kmmap_prefix[PATH_MAX];
  89. enum dso_kernel_type kernel_type;
  90. bool is_kernel_mmap;
  91. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  92. if (machine__is_host(machine))
  93. kernel_type = DSO_TYPE_KERNEL;
  94. else
  95. kernel_type = DSO_TYPE_GUEST_KERNEL;
  96. is_kernel_mmap = memcmp(event->mmap.filename,
  97. kmmap_prefix,
  98. strlen(kmmap_prefix) - 1) == 0;
  99. if (event->mmap.filename[0] == '/' ||
  100. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  101. char short_module_name[1024];
  102. char *name, *dot;
  103. if (event->mmap.filename[0] == '/') {
  104. name = strrchr(event->mmap.filename, '/');
  105. if (name == NULL)
  106. goto out_problem;
  107. ++name; /* skip / */
  108. dot = strrchr(name, '.');
  109. if (dot == NULL)
  110. goto out_problem;
  111. snprintf(short_module_name, sizeof(short_module_name),
  112. "[%.*s]", (int)(dot - name), name);
  113. strxfrchar(short_module_name, '-', '_');
  114. } else
  115. strcpy(short_module_name, event->mmap.filename);
  116. map = machine__new_module(machine, event->mmap.start,
  117. event->mmap.filename);
  118. if (map == NULL)
  119. goto out_problem;
  120. name = strdup(short_module_name);
  121. if (name == NULL)
  122. goto out_problem;
  123. map->dso->short_name = name;
  124. map->dso->sname_alloc = 1;
  125. map->end = map->start + event->mmap.len;
  126. } else if (is_kernel_mmap) {
  127. const char *symbol_name = (event->mmap.filename +
  128. strlen(kmmap_prefix));
  129. /*
  130. * Should be there already, from the build-id table in
  131. * the header.
  132. */
  133. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  134. kmmap_prefix);
  135. if (kernel == NULL)
  136. goto out_problem;
  137. kernel->kernel = kernel_type;
  138. if (__machine__create_kernel_maps(machine, kernel) < 0)
  139. goto out_problem;
  140. machine__set_kernel_mmap_len(machine, event);
  141. /*
  142. * Avoid using a zero address (kptr_restrict) for the ref reloc
  143. * symbol. Effectively having zero here means that at record
  144. * time /proc/sys/kernel/kptr_restrict was non zero.
  145. */
  146. if (event->mmap.pgoff != 0) {
  147. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  148. symbol_name,
  149. event->mmap.pgoff);
  150. }
  151. if (machine__is_default_guest(machine)) {
  152. /*
  153. * preload dso of guest kernel and modules
  154. */
  155. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  156. NULL);
  157. }
  158. }
  159. return 0;
  160. out_problem:
  161. return -1;
  162. }
  163. int machine__process_mmap_event(struct machine *machine, union perf_event *event)
  164. {
  165. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  166. struct thread *thread;
  167. struct map *map;
  168. int ret = 0;
  169. if (dump_trace)
  170. perf_event__fprintf_mmap(event, stdout);
  171. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  172. cpumode == PERF_RECORD_MISC_KERNEL) {
  173. ret = machine__process_kernel_mmap_event(machine, event);
  174. if (ret < 0)
  175. goto out_problem;
  176. return 0;
  177. }
  178. thread = machine__findnew_thread(machine, event->mmap.pid);
  179. if (thread == NULL)
  180. goto out_problem;
  181. map = map__new(&machine->user_dsos, event->mmap.start,
  182. event->mmap.len, event->mmap.pgoff,
  183. event->mmap.pid, event->mmap.filename,
  184. MAP__FUNCTION);
  185. if (map == NULL)
  186. goto out_problem;
  187. thread__insert_map(thread, map);
  188. return 0;
  189. out_problem:
  190. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  191. return 0;
  192. }
  193. int machine__process_fork_event(struct machine *machine, union perf_event *event)
  194. {
  195. struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
  196. struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
  197. if (dump_trace)
  198. perf_event__fprintf_task(event, stdout);
  199. if (thread == NULL || parent == NULL ||
  200. thread__fork(thread, parent) < 0) {
  201. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  202. return -1;
  203. }
  204. return 0;
  205. }
  206. int machine__process_exit_event(struct machine *machine, union perf_event *event)
  207. {
  208. struct thread *thread = machine__find_thread(machine, event->fork.tid);
  209. if (dump_trace)
  210. perf_event__fprintf_task(event, stdout);
  211. if (thread != NULL)
  212. machine__remove_thread(machine, thread);
  213. return 0;
  214. }
  215. int machine__process_event(struct machine *machine, union perf_event *event)
  216. {
  217. int ret;
  218. switch (event->header.type) {
  219. case PERF_RECORD_COMM:
  220. ret = machine__process_comm_event(machine, event); break;
  221. case PERF_RECORD_MMAP:
  222. ret = machine__process_mmap_event(machine, event); break;
  223. case PERF_RECORD_FORK:
  224. ret = machine__process_fork_event(machine, event); break;
  225. case PERF_RECORD_EXIT:
  226. ret = machine__process_exit_event(machine, event); break;
  227. case PERF_RECORD_LOST:
  228. ret = machine__process_lost_event(machine, event); break;
  229. default:
  230. ret = -1;
  231. break;
  232. }
  233. return ret;
  234. }