machine.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. #include "debug.h"
  2. #include "event.h"
  3. #include "machine.h"
  4. #include "map.h"
  5. #include "thread.h"
  6. #include <stdbool.h>
  7. static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
  8. bool create)
  9. {
  10. struct rb_node **p = &machine->threads.rb_node;
  11. struct rb_node *parent = NULL;
  12. struct thread *th;
  13. /*
  14. * Font-end cache - PID lookups come in blocks,
  15. * so most of the time we dont have to look up
  16. * the full rbtree:
  17. */
  18. if (machine->last_match && machine->last_match->pid == pid)
  19. return machine->last_match;
  20. while (*p != NULL) {
  21. parent = *p;
  22. th = rb_entry(parent, struct thread, rb_node);
  23. if (th->pid == pid) {
  24. machine->last_match = th;
  25. return th;
  26. }
  27. if (pid < th->pid)
  28. p = &(*p)->rb_left;
  29. else
  30. p = &(*p)->rb_right;
  31. }
  32. if (!create)
  33. return NULL;
  34. th = thread__new(pid);
  35. if (th != NULL) {
  36. rb_link_node(&th->rb_node, parent, p);
  37. rb_insert_color(&th->rb_node, &machine->threads);
  38. machine->last_match = th;
  39. }
  40. return th;
  41. }
  42. struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
  43. {
  44. return __machine__findnew_thread(machine, pid, true);
  45. }
  46. struct thread *machine__find_thread(struct machine *machine, pid_t pid)
  47. {
  48. return __machine__findnew_thread(machine, pid, false);
  49. }
  50. int machine__process_comm_event(struct machine *machine, union perf_event *event)
  51. {
  52. struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
  53. if (dump_trace)
  54. perf_event__fprintf_comm(event, stdout);
  55. if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
  56. dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
  57. return -1;
  58. }
  59. return 0;
  60. }
  61. int machine__process_lost_event(struct machine *machine __maybe_unused,
  62. union perf_event *event)
  63. {
  64. dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
  65. event->lost.id, event->lost.lost);
  66. return 0;
  67. }
  68. static void machine__set_kernel_mmap_len(struct machine *machine,
  69. union perf_event *event)
  70. {
  71. machine->vmlinux_maps[MAP__FUNCTION]->start = event->mmap.start;
  72. machine->vmlinux_maps[MAP__FUNCTION]->end = (event->mmap.start +
  73. event->mmap.len);
  74. /*
  75. * Be a bit paranoid here, some perf.data file came with
  76. * a zero sized synthesized MMAP event for the kernel.
  77. */
  78. if (machine->vmlinux_maps[MAP__FUNCTION]->end == 0)
  79. machine->vmlinux_maps[MAP__FUNCTION]->end = ~0ULL;
  80. }
  81. static int machine__process_kernel_mmap_event(struct machine *machine,
  82. union perf_event *event)
  83. {
  84. struct map *map;
  85. char kmmap_prefix[PATH_MAX];
  86. enum dso_kernel_type kernel_type;
  87. bool is_kernel_mmap;
  88. machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
  89. if (machine__is_host(machine))
  90. kernel_type = DSO_TYPE_KERNEL;
  91. else
  92. kernel_type = DSO_TYPE_GUEST_KERNEL;
  93. is_kernel_mmap = memcmp(event->mmap.filename,
  94. kmmap_prefix,
  95. strlen(kmmap_prefix) - 1) == 0;
  96. if (event->mmap.filename[0] == '/' ||
  97. (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
  98. char short_module_name[1024];
  99. char *name, *dot;
  100. if (event->mmap.filename[0] == '/') {
  101. name = strrchr(event->mmap.filename, '/');
  102. if (name == NULL)
  103. goto out_problem;
  104. ++name; /* skip / */
  105. dot = strrchr(name, '.');
  106. if (dot == NULL)
  107. goto out_problem;
  108. snprintf(short_module_name, sizeof(short_module_name),
  109. "[%.*s]", (int)(dot - name), name);
  110. strxfrchar(short_module_name, '-', '_');
  111. } else
  112. strcpy(short_module_name, event->mmap.filename);
  113. map = machine__new_module(machine, event->mmap.start,
  114. event->mmap.filename);
  115. if (map == NULL)
  116. goto out_problem;
  117. name = strdup(short_module_name);
  118. if (name == NULL)
  119. goto out_problem;
  120. map->dso->short_name = name;
  121. map->dso->sname_alloc = 1;
  122. map->end = map->start + event->mmap.len;
  123. } else if (is_kernel_mmap) {
  124. const char *symbol_name = (event->mmap.filename +
  125. strlen(kmmap_prefix));
  126. /*
  127. * Should be there already, from the build-id table in
  128. * the header.
  129. */
  130. struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
  131. kmmap_prefix);
  132. if (kernel == NULL)
  133. goto out_problem;
  134. kernel->kernel = kernel_type;
  135. if (__machine__create_kernel_maps(machine, kernel) < 0)
  136. goto out_problem;
  137. machine__set_kernel_mmap_len(machine, event);
  138. /*
  139. * Avoid using a zero address (kptr_restrict) for the ref reloc
  140. * symbol. Effectively having zero here means that at record
  141. * time /proc/sys/kernel/kptr_restrict was non zero.
  142. */
  143. if (event->mmap.pgoff != 0) {
  144. maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
  145. symbol_name,
  146. event->mmap.pgoff);
  147. }
  148. if (machine__is_default_guest(machine)) {
  149. /*
  150. * preload dso of guest kernel and modules
  151. */
  152. dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
  153. NULL);
  154. }
  155. }
  156. return 0;
  157. out_problem:
  158. return -1;
  159. }
  160. int machine__process_mmap_event(struct machine *machine, union perf_event *event)
  161. {
  162. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  163. struct thread *thread;
  164. struct map *map;
  165. int ret = 0;
  166. if (dump_trace)
  167. perf_event__fprintf_mmap(event, stdout);
  168. if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
  169. cpumode == PERF_RECORD_MISC_KERNEL) {
  170. ret = machine__process_kernel_mmap_event(machine, event);
  171. if (ret < 0)
  172. goto out_problem;
  173. return 0;
  174. }
  175. thread = machine__findnew_thread(machine, event->mmap.pid);
  176. if (thread == NULL)
  177. goto out_problem;
  178. map = map__new(&machine->user_dsos, event->mmap.start,
  179. event->mmap.len, event->mmap.pgoff,
  180. event->mmap.pid, event->mmap.filename,
  181. MAP__FUNCTION);
  182. if (map == NULL)
  183. goto out_problem;
  184. thread__insert_map(thread, map);
  185. return 0;
  186. out_problem:
  187. dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
  188. return 0;
  189. }
  190. int machine__process_fork_event(struct machine *machine, union perf_event *event)
  191. {
  192. struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
  193. struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
  194. if (dump_trace)
  195. perf_event__fprintf_task(event, stdout);
  196. if (thread == NULL || parent == NULL ||
  197. thread__fork(thread, parent) < 0) {
  198. dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
  199. return -1;
  200. }
  201. return 0;
  202. }
  203. int machine__process_exit_event(struct machine *machine, union perf_event *event)
  204. {
  205. struct thread *thread = machine__find_thread(machine, event->fork.tid);
  206. if (dump_trace)
  207. perf_event__fprintf_task(event, stdout);
  208. if (thread != NULL)
  209. machine__remove_thread(machine, thread);
  210. return 0;
  211. }
  212. int machine__process_event(struct machine *machine, union perf_event *event)
  213. {
  214. int ret;
  215. switch (event->header.type) {
  216. case PERF_RECORD_COMM:
  217. ret = machine__process_comm_event(machine, event); break;
  218. case PERF_RECORD_MMAP:
  219. ret = machine__process_mmap_event(machine, event); break;
  220. case PERF_RECORD_FORK:
  221. ret = machine__process_fork_event(machine, event); break;
  222. case PERF_RECORD_EXIT:
  223. ret = machine__process_exit_event(machine, event); break;
  224. case PERF_RECORD_LOST:
  225. ret = machine__process_lost_event(machine, event); break;
  226. default:
  227. ret = -1;
  228. break;
  229. }
  230. return ret;
  231. }