thread.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. #include "../perf.h"
  2. #include <stdlib.h>
  3. #include <stdio.h>
  4. #include <string.h>
  5. #include "session.h"
  6. #include "thread.h"
  7. #include "util.h"
  8. #include "debug.h"
  9. void map_groups__init(struct map_groups *self)
  10. {
  11. int i;
  12. for (i = 0; i < MAP__NR_TYPES; ++i) {
  13. self->maps[i] = RB_ROOT;
  14. INIT_LIST_HEAD(&self->removed_maps[i]);
  15. }
  16. }
  17. static struct thread *thread__new(pid_t pid)
  18. {
  19. struct thread *self = zalloc(sizeof(*self));
  20. if (self != NULL) {
  21. map_groups__init(&self->mg);
  22. self->pid = pid;
  23. self->comm = malloc(32);
  24. if (self->comm)
  25. snprintf(self->comm, 32, ":%d", self->pid);
  26. }
  27. return self;
  28. }
  29. static void map_groups__flush(struct map_groups *self)
  30. {
  31. int type;
  32. for (type = 0; type < MAP__NR_TYPES; type++) {
  33. struct rb_root *root = &self->maps[type];
  34. struct rb_node *next = rb_first(root);
  35. while (next) {
  36. struct map *pos = rb_entry(next, struct map, rb_node);
  37. next = rb_next(&pos->rb_node);
  38. rb_erase(&pos->rb_node, root);
  39. /*
  40. * We may have references to this map, for
  41. * instance in some hist_entry instances, so
  42. * just move them to a separate list.
  43. */
  44. list_add_tail(&pos->node, &self->removed_maps[pos->type]);
  45. }
  46. }
  47. }
  48. int thread__set_comm(struct thread *self, const char *comm)
  49. {
  50. int err;
  51. if (self->comm)
  52. free(self->comm);
  53. self->comm = strdup(comm);
  54. err = self->comm == NULL ? -ENOMEM : 0;
  55. if (!err) {
  56. self->comm_set = true;
  57. map_groups__flush(&self->mg);
  58. }
  59. return err;
  60. }
  61. int thread__comm_len(struct thread *self)
  62. {
  63. if (!self->comm_len) {
  64. if (!self->comm)
  65. return 0;
  66. self->comm_len = strlen(self->comm);
  67. }
  68. return self->comm_len;
  69. }
  70. size_t __map_groups__fprintf_maps(struct map_groups *self,
  71. enum map_type type, FILE *fp)
  72. {
  73. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  74. struct rb_node *nd;
  75. for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
  76. struct map *pos = rb_entry(nd, struct map, rb_node);
  77. printed += fprintf(fp, "Map:");
  78. printed += map__fprintf(pos, fp);
  79. if (verbose > 2) {
  80. printed += dso__fprintf(pos->dso, type, fp);
  81. printed += fprintf(fp, "--\n");
  82. }
  83. }
  84. return printed;
  85. }
  86. size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
  87. {
  88. size_t printed = 0, i;
  89. for (i = 0; i < MAP__NR_TYPES; ++i)
  90. printed += __map_groups__fprintf_maps(self, i, fp);
  91. return printed;
  92. }
  93. static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
  94. enum map_type type, FILE *fp)
  95. {
  96. struct map *pos;
  97. size_t printed = 0;
  98. list_for_each_entry(pos, &self->removed_maps[type], node) {
  99. printed += fprintf(fp, "Map:");
  100. printed += map__fprintf(pos, fp);
  101. if (verbose > 1) {
  102. printed += dso__fprintf(pos->dso, type, fp);
  103. printed += fprintf(fp, "--\n");
  104. }
  105. }
  106. return printed;
  107. }
  108. static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
  109. {
  110. size_t printed = 0, i;
  111. for (i = 0; i < MAP__NR_TYPES; ++i)
  112. printed += __map_groups__fprintf_removed_maps(self, i, fp);
  113. return printed;
  114. }
  115. static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
  116. {
  117. size_t printed = map_groups__fprintf_maps(self, fp);
  118. printed += fprintf(fp, "Removed maps:\n");
  119. return printed + map_groups__fprintf_removed_maps(self, fp);
  120. }
  121. static size_t thread__fprintf(struct thread *self, FILE *fp)
  122. {
  123. return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
  124. map_groups__fprintf(&self->mg, fp);
  125. }
  126. struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
  127. {
  128. struct rb_node **p = &self->threads.rb_node;
  129. struct rb_node *parent = NULL;
  130. struct thread *th;
  131. /*
  132. * Font-end cache - PID lookups come in blocks,
  133. * so most of the time we dont have to look up
  134. * the full rbtree:
  135. */
  136. if (self->last_match && self->last_match->pid == pid)
  137. return self->last_match;
  138. while (*p != NULL) {
  139. parent = *p;
  140. th = rb_entry(parent, struct thread, rb_node);
  141. if (th->pid == pid) {
  142. self->last_match = th;
  143. return th;
  144. }
  145. if (pid < th->pid)
  146. p = &(*p)->rb_left;
  147. else
  148. p = &(*p)->rb_right;
  149. }
  150. th = thread__new(pid);
  151. if (th != NULL) {
  152. rb_link_node(&th->rb_node, parent, p);
  153. rb_insert_color(&th->rb_node, &self->threads);
  154. self->last_match = th;
  155. }
  156. return th;
  157. }
  158. static int map_groups__fixup_overlappings(struct map_groups *self,
  159. struct map *map)
  160. {
  161. struct rb_root *root = &self->maps[map->type];
  162. struct rb_node *next = rb_first(root);
  163. while (next) {
  164. struct map *pos = rb_entry(next, struct map, rb_node);
  165. next = rb_next(&pos->rb_node);
  166. if (!map__overlap(pos, map))
  167. continue;
  168. if (verbose >= 2) {
  169. fputs("overlapping maps:\n", stderr);
  170. map__fprintf(map, stderr);
  171. map__fprintf(pos, stderr);
  172. }
  173. rb_erase(&pos->rb_node, root);
  174. /*
  175. * We may have references to this map, for instance in some
  176. * hist_entry instances, so just move them to a separate
  177. * list.
  178. */
  179. list_add_tail(&pos->node, &self->removed_maps[map->type]);
  180. /*
  181. * Now check if we need to create new maps for areas not
  182. * overlapped by the new map:
  183. */
  184. if (map->start > pos->start) {
  185. struct map *before = map__clone(pos);
  186. if (before == NULL)
  187. return -ENOMEM;
  188. before->end = map->start - 1;
  189. map_groups__insert(self, before);
  190. if (verbose >= 2)
  191. map__fprintf(before, stderr);
  192. }
  193. if (map->end < pos->end) {
  194. struct map *after = map__clone(pos);
  195. if (after == NULL)
  196. return -ENOMEM;
  197. after->start = map->end + 1;
  198. map_groups__insert(self, after);
  199. if (verbose >= 2)
  200. map__fprintf(after, stderr);
  201. }
  202. }
  203. return 0;
  204. }
  205. void maps__insert(struct rb_root *maps, struct map *map)
  206. {
  207. struct rb_node **p = &maps->rb_node;
  208. struct rb_node *parent = NULL;
  209. const u64 ip = map->start;
  210. struct map *m;
  211. while (*p != NULL) {
  212. parent = *p;
  213. m = rb_entry(parent, struct map, rb_node);
  214. if (ip < m->start)
  215. p = &(*p)->rb_left;
  216. else
  217. p = &(*p)->rb_right;
  218. }
  219. rb_link_node(&map->rb_node, parent, p);
  220. rb_insert_color(&map->rb_node, maps);
  221. }
  222. struct map *maps__find(struct rb_root *maps, u64 ip)
  223. {
  224. struct rb_node **p = &maps->rb_node;
  225. struct rb_node *parent = NULL;
  226. struct map *m;
  227. while (*p != NULL) {
  228. parent = *p;
  229. m = rb_entry(parent, struct map, rb_node);
  230. if (ip < m->start)
  231. p = &(*p)->rb_left;
  232. else if (ip > m->end)
  233. p = &(*p)->rb_right;
  234. else
  235. return m;
  236. }
  237. return NULL;
  238. }
  239. void thread__insert_map(struct thread *self, struct map *map)
  240. {
  241. map_groups__fixup_overlappings(&self->mg, map);
  242. map_groups__insert(&self->mg, map);
  243. }
  244. /*
  245. * XXX This should not really _copy_ te maps, but refcount them.
  246. */
  247. static int map_groups__clone(struct map_groups *self,
  248. struct map_groups *parent, enum map_type type)
  249. {
  250. struct rb_node *nd;
  251. for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
  252. struct map *map = rb_entry(nd, struct map, rb_node);
  253. struct map *new = map__clone(map);
  254. if (new == NULL)
  255. return -ENOMEM;
  256. map_groups__insert(self, new);
  257. }
  258. return 0;
  259. }
  260. int thread__fork(struct thread *self, struct thread *parent)
  261. {
  262. int i;
  263. if (parent->comm_set) {
  264. if (self->comm)
  265. free(self->comm);
  266. self->comm = strdup(parent->comm);
  267. if (!self->comm)
  268. return -ENOMEM;
  269. self->comm_set = true;
  270. }
  271. for (i = 0; i < MAP__NR_TYPES; ++i)
  272. if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
  273. return -ENOMEM;
  274. return 0;
  275. }
  276. size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
  277. {
  278. size_t ret = 0;
  279. struct rb_node *nd;
  280. for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
  281. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  282. ret += thread__fprintf(pos, fp);
  283. }
  284. return ret;
  285. }
  286. struct symbol *map_groups__find_symbol(struct map_groups *self,
  287. enum map_type type, u64 addr,
  288. symbol_filter_t filter)
  289. {
  290. struct map *map = map_groups__find(self, type, addr);
  291. if (map != NULL)
  292. return map__find_symbol(map, map->map_ip(map, addr), filter);
  293. return NULL;
  294. }