thread.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. #include "../perf.h"
  2. #include <stdlib.h>
  3. #include <stdio.h>
  4. #include <string.h>
  5. #include "session.h"
  6. #include "thread.h"
  7. #include "util.h"
  8. #include "debug.h"
  9. void map_groups__init(struct map_groups *self)
  10. {
  11. int i;
  12. for (i = 0; i < MAP__NR_TYPES; ++i) {
  13. self->maps[i] = RB_ROOT;
  14. INIT_LIST_HEAD(&self->removed_maps[i]);
  15. }
  16. }
  17. static struct thread *thread__new(pid_t pid)
  18. {
  19. struct thread *self = zalloc(sizeof(*self));
  20. if (self != NULL) {
  21. map_groups__init(&self->mg);
  22. self->pid = pid;
  23. self->comm = malloc(32);
  24. if (self->comm)
  25. snprintf(self->comm, 32, ":%d", self->pid);
  26. }
  27. return self;
  28. }
  29. static void map_groups__flush(struct map_groups *self)
  30. {
  31. int type;
  32. for (type = 0; type < MAP__NR_TYPES; type++) {
  33. struct rb_root *root = &self->maps[type];
  34. struct rb_node *next = rb_first(root);
  35. while (next) {
  36. struct map *pos = rb_entry(next, struct map, rb_node);
  37. next = rb_next(&pos->rb_node);
  38. rb_erase(&pos->rb_node, root);
  39. /*
  40. * We may have references to this map, for
  41. * instance in some hist_entry instances, so
  42. * just move them to a separate list.
  43. */
  44. list_add_tail(&pos->node, &self->removed_maps[pos->type]);
  45. }
  46. }
  47. }
  48. int thread__set_comm(struct thread *self, const char *comm)
  49. {
  50. int err;
  51. if (self->comm)
  52. free(self->comm);
  53. self->comm = strdup(comm);
  54. err = self->comm == NULL ? -ENOMEM : 0;
  55. if (!err) {
  56. self->comm_set = true;
  57. map_groups__flush(&self->mg);
  58. }
  59. return err;
  60. }
  61. int thread__comm_len(struct thread *self)
  62. {
  63. if (!self->comm_len) {
  64. if (!self->comm)
  65. return 0;
  66. self->comm_len = strlen(self->comm);
  67. }
  68. return self->comm_len;
  69. }
  70. static size_t __map_groups__fprintf_maps(struct map_groups *self,
  71. enum map_type type, FILE *fp)
  72. {
  73. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  74. struct rb_node *nd;
  75. for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
  76. struct map *pos = rb_entry(nd, struct map, rb_node);
  77. printed += fprintf(fp, "Map:");
  78. printed += map__fprintf(pos, fp);
  79. if (verbose > 1) {
  80. printed += dso__fprintf(pos->dso, type, fp);
  81. printed += fprintf(fp, "--\n");
  82. }
  83. }
  84. return printed;
  85. }
  86. size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
  87. {
  88. size_t printed = 0, i;
  89. for (i = 0; i < MAP__NR_TYPES; ++i)
  90. printed += __map_groups__fprintf_maps(self, i, fp);
  91. return printed;
  92. }
  93. static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
  94. enum map_type type, FILE *fp)
  95. {
  96. struct map *pos;
  97. size_t printed = 0;
  98. list_for_each_entry(pos, &self->removed_maps[type], node) {
  99. printed += fprintf(fp, "Map:");
  100. printed += map__fprintf(pos, fp);
  101. if (verbose > 1) {
  102. printed += dso__fprintf(pos->dso, type, fp);
  103. printed += fprintf(fp, "--\n");
  104. }
  105. }
  106. return printed;
  107. }
  108. static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
  109. {
  110. size_t printed = 0, i;
  111. for (i = 0; i < MAP__NR_TYPES; ++i)
  112. printed += __map_groups__fprintf_removed_maps(self, i, fp);
  113. return printed;
  114. }
  115. static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
  116. {
  117. size_t printed = map_groups__fprintf_maps(self, fp);
  118. printed += fprintf(fp, "Removed maps:\n");
  119. return printed + map_groups__fprintf_removed_maps(self, fp);
  120. }
  121. static size_t thread__fprintf(struct thread *self, FILE *fp)
  122. {
  123. return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
  124. map_groups__fprintf(&self->mg, fp);
  125. }
  126. struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
  127. {
  128. struct rb_node **p = &self->threads.rb_node;
  129. struct rb_node *parent = NULL;
  130. struct thread *th;
  131. /*
  132. * Font-end cache - PID lookups come in blocks,
  133. * so most of the time we dont have to look up
  134. * the full rbtree:
  135. */
  136. if (self->last_match && self->last_match->pid == pid)
  137. return self->last_match;
  138. while (*p != NULL) {
  139. parent = *p;
  140. th = rb_entry(parent, struct thread, rb_node);
  141. if (th->pid == pid) {
  142. self->last_match = th;
  143. return th;
  144. }
  145. if (pid < th->pid)
  146. p = &(*p)->rb_left;
  147. else
  148. p = &(*p)->rb_right;
  149. }
  150. th = thread__new(pid);
  151. if (th != NULL) {
  152. rb_link_node(&th->rb_node, parent, p);
  153. rb_insert_color(&th->rb_node, &self->threads);
  154. self->last_match = th;
  155. }
  156. return th;
  157. }
  158. static void map_groups__remove_overlappings(struct map_groups *self,
  159. struct map *map)
  160. {
  161. struct rb_root *root = &self->maps[map->type];
  162. struct rb_node *next = rb_first(root);
  163. while (next) {
  164. struct map *pos = rb_entry(next, struct map, rb_node);
  165. next = rb_next(&pos->rb_node);
  166. if (!map__overlap(pos, map))
  167. continue;
  168. if (verbose >= 2) {
  169. fputs("overlapping maps:\n", stderr);
  170. map__fprintf(map, stderr);
  171. map__fprintf(pos, stderr);
  172. }
  173. rb_erase(&pos->rb_node, root);
  174. /*
  175. * We may have references to this map, for instance in some
  176. * hist_entry instances, so just move them to a separate
  177. * list.
  178. */
  179. list_add_tail(&pos->node, &self->removed_maps[map->type]);
  180. }
  181. }
  182. void maps__insert(struct rb_root *maps, struct map *map)
  183. {
  184. struct rb_node **p = &maps->rb_node;
  185. struct rb_node *parent = NULL;
  186. const u64 ip = map->start;
  187. struct map *m;
  188. while (*p != NULL) {
  189. parent = *p;
  190. m = rb_entry(parent, struct map, rb_node);
  191. if (ip < m->start)
  192. p = &(*p)->rb_left;
  193. else
  194. p = &(*p)->rb_right;
  195. }
  196. rb_link_node(&map->rb_node, parent, p);
  197. rb_insert_color(&map->rb_node, maps);
  198. }
  199. struct map *maps__find(struct rb_root *maps, u64 ip)
  200. {
  201. struct rb_node **p = &maps->rb_node;
  202. struct rb_node *parent = NULL;
  203. struct map *m;
  204. while (*p != NULL) {
  205. parent = *p;
  206. m = rb_entry(parent, struct map, rb_node);
  207. if (ip < m->start)
  208. p = &(*p)->rb_left;
  209. else if (ip > m->end)
  210. p = &(*p)->rb_right;
  211. else
  212. return m;
  213. }
  214. return NULL;
  215. }
  216. void thread__insert_map(struct thread *self, struct map *map)
  217. {
  218. map_groups__remove_overlappings(&self->mg, map);
  219. map_groups__insert(&self->mg, map);
  220. }
  221. /*
  222. * XXX This should not really _copy_ te maps, but refcount them.
  223. */
  224. static int map_groups__clone(struct map_groups *self,
  225. struct map_groups *parent, enum map_type type)
  226. {
  227. struct rb_node *nd;
  228. for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
  229. struct map *map = rb_entry(nd, struct map, rb_node);
  230. struct map *new = map__clone(map);
  231. if (new == NULL)
  232. return -ENOMEM;
  233. map_groups__insert(self, new);
  234. }
  235. return 0;
  236. }
  237. int thread__fork(struct thread *self, struct thread *parent)
  238. {
  239. int i;
  240. if (parent->comm_set) {
  241. if (self->comm)
  242. free(self->comm);
  243. self->comm = strdup(parent->comm);
  244. if (!self->comm)
  245. return -ENOMEM;
  246. self->comm_set = true;
  247. }
  248. for (i = 0; i < MAP__NR_TYPES; ++i)
  249. if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
  250. return -ENOMEM;
  251. return 0;
  252. }
  253. size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
  254. {
  255. size_t ret = 0;
  256. struct rb_node *nd;
  257. for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
  258. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  259. ret += thread__fprintf(pos, fp);
  260. }
  261. return ret;
  262. }
  263. struct symbol *map_groups__find_symbol(struct map_groups *self,
  264. enum map_type type, u64 addr,
  265. symbol_filter_t filter)
  266. {
  267. struct map *map = map_groups__find(self, type, addr);
  268. if (map != NULL)
  269. return map__find_symbol(map, map->map_ip(map, addr), filter);
  270. return NULL;
  271. }