thread.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #include "../perf.h"
  2. #include <stdlib.h>
  3. #include <stdio.h>
  4. #include <string.h>
  5. #include "thread.h"
  6. #include "util.h"
  7. static struct thread *thread__new(pid_t pid)
  8. {
  9. struct thread *self = malloc(sizeof(*self));
  10. if (self != NULL) {
  11. self->pid = pid;
  12. self->comm = malloc(32);
  13. if (self->comm)
  14. snprintf(self->comm, 32, ":%d", self->pid);
  15. INIT_LIST_HEAD(&self->maps);
  16. }
  17. return self;
  18. }
  19. int thread__set_comm(struct thread *self, const char *comm)
  20. {
  21. if (self->comm)
  22. free(self->comm);
  23. self->comm = strdup(comm);
  24. return self->comm ? 0 : -ENOMEM;
  25. }
  26. static size_t thread__fprintf(struct thread *self, FILE *fp)
  27. {
  28. struct map *pos;
  29. size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
  30. list_for_each_entry(pos, &self->maps, node)
  31. ret += map__fprintf(pos, fp);
  32. return ret;
  33. }
  34. struct thread *
  35. threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
  36. {
  37. struct rb_node **p = &threads->rb_node;
  38. struct rb_node *parent = NULL;
  39. struct thread *th;
  40. /*
  41. * Font-end cache - PID lookups come in blocks,
  42. * so most of the time we dont have to look up
  43. * the full rbtree:
  44. */
  45. if (*last_match && (*last_match)->pid == pid)
  46. return *last_match;
  47. while (*p != NULL) {
  48. parent = *p;
  49. th = rb_entry(parent, struct thread, rb_node);
  50. if (th->pid == pid) {
  51. *last_match = th;
  52. return th;
  53. }
  54. if (pid < th->pid)
  55. p = &(*p)->rb_left;
  56. else
  57. p = &(*p)->rb_right;
  58. }
  59. th = thread__new(pid);
  60. if (th != NULL) {
  61. rb_link_node(&th->rb_node, parent, p);
  62. rb_insert_color(&th->rb_node, threads);
  63. *last_match = th;
  64. }
  65. return th;
  66. }
  67. void thread__insert_map(struct thread *self, struct map *map)
  68. {
  69. struct map *pos, *tmp;
  70. list_for_each_entry_safe(pos, tmp, &self->maps, node) {
  71. if (map__overlap(pos, map)) {
  72. list_del_init(&pos->node);
  73. /* XXX leaks dsos */
  74. free(pos);
  75. }
  76. }
  77. list_add_tail(&map->node, &self->maps);
  78. }
  79. int thread__fork(struct thread *self, struct thread *parent)
  80. {
  81. struct map *map;
  82. if (self->comm)
  83. free(self->comm);
  84. self->comm = strdup(parent->comm);
  85. if (!self->comm)
  86. return -ENOMEM;
  87. list_for_each_entry(map, &parent->maps, node) {
  88. struct map *new = map__clone(map);
  89. if (!new)
  90. return -ENOMEM;
  91. thread__insert_map(self, new);
  92. }
  93. return 0;
  94. }
  95. struct map *thread__find_map(struct thread *self, u64 ip)
  96. {
  97. struct map *pos;
  98. if (self == NULL)
  99. return NULL;
  100. list_for_each_entry(pos, &self->maps, node)
  101. if (ip >= pos->start && ip <= pos->end)
  102. return pos;
  103. return NULL;
  104. }
  105. size_t threads__fprintf(FILE *fp, struct rb_root *threads)
  106. {
  107. size_t ret = 0;
  108. struct rb_node *nd;
  109. for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
  110. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  111. ret += thread__fprintf(pos, fp);
  112. }
  113. return ret;
  114. }