thread.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. #include "../perf.h"
  2. #include <stdlib.h>
  3. #include <stdio.h>
  4. #include <string.h>
  5. #include "session.h"
  6. #include "thread.h"
  7. #include "util.h"
  8. #include "debug.h"
  9. /* Skip "." and ".." directories */
  10. static int filter(const struct dirent *dir)
  11. {
  12. if (dir->d_name[0] == '.')
  13. return 0;
  14. else
  15. return 1;
  16. }
  17. int find_all_tid(int pid, pid_t ** all_tid)
  18. {
  19. char name[256];
  20. int items;
  21. struct dirent **namelist = NULL;
  22. int ret = 0;
  23. int i;
  24. sprintf(name, "/proc/%d/task", pid);
  25. items = scandir(name, &namelist, filter, NULL);
  26. if (items <= 0)
  27. return -ENOENT;
  28. *all_tid = malloc(sizeof(pid_t) * items);
  29. if (!*all_tid) {
  30. ret = -ENOMEM;
  31. goto failure;
  32. }
  33. for (i = 0; i < items; i++)
  34. (*all_tid)[i] = atoi(namelist[i]->d_name);
  35. ret = items;
  36. failure:
  37. for (i=0; i<items; i++)
  38. free(namelist[i]);
  39. free(namelist);
  40. return ret;
  41. }
  42. static struct thread *thread__new(pid_t pid)
  43. {
  44. struct thread *self = zalloc(sizeof(*self));
  45. if (self != NULL) {
  46. map_groups__init(&self->mg);
  47. self->pid = pid;
  48. self->comm = malloc(32);
  49. if (self->comm)
  50. snprintf(self->comm, 32, ":%d", self->pid);
  51. }
  52. return self;
  53. }
  54. void thread__delete(struct thread *self)
  55. {
  56. map_groups__exit(&self->mg);
  57. free(self->comm);
  58. free(self);
  59. }
  60. int thread__set_comm(struct thread *self, const char *comm)
  61. {
  62. int err;
  63. if (self->comm)
  64. free(self->comm);
  65. self->comm = strdup(comm);
  66. err = self->comm == NULL ? -ENOMEM : 0;
  67. if (!err) {
  68. self->comm_set = true;
  69. map_groups__flush(&self->mg);
  70. }
  71. return err;
  72. }
  73. int thread__comm_len(struct thread *self)
  74. {
  75. if (!self->comm_len) {
  76. if (!self->comm)
  77. return 0;
  78. self->comm_len = strlen(self->comm);
  79. }
  80. return self->comm_len;
  81. }
  82. static size_t thread__fprintf(struct thread *self, FILE *fp)
  83. {
  84. return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
  85. map_groups__fprintf(&self->mg, verbose, fp);
  86. }
  87. struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
  88. {
  89. struct rb_node **p = &self->threads.rb_node;
  90. struct rb_node *parent = NULL;
  91. struct thread *th;
  92. /*
  93. * Font-end cache - PID lookups come in blocks,
  94. * so most of the time we dont have to look up
  95. * the full rbtree:
  96. */
  97. if (self->last_match && self->last_match->pid == pid)
  98. return self->last_match;
  99. while (*p != NULL) {
  100. parent = *p;
  101. th = rb_entry(parent, struct thread, rb_node);
  102. if (th->pid == pid) {
  103. self->last_match = th;
  104. return th;
  105. }
  106. if (pid < th->pid)
  107. p = &(*p)->rb_left;
  108. else
  109. p = &(*p)->rb_right;
  110. }
  111. th = thread__new(pid);
  112. if (th != NULL) {
  113. rb_link_node(&th->rb_node, parent, p);
  114. rb_insert_color(&th->rb_node, &self->threads);
  115. self->last_match = th;
  116. }
  117. return th;
  118. }
  119. void thread__insert_map(struct thread *self, struct map *map)
  120. {
  121. map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
  122. map_groups__insert(&self->mg, map);
  123. }
  124. int thread__fork(struct thread *self, struct thread *parent)
  125. {
  126. int i;
  127. if (parent->comm_set) {
  128. if (self->comm)
  129. free(self->comm);
  130. self->comm = strdup(parent->comm);
  131. if (!self->comm)
  132. return -ENOMEM;
  133. self->comm_set = true;
  134. }
  135. for (i = 0; i < MAP__NR_TYPES; ++i)
  136. if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
  137. return -ENOMEM;
  138. return 0;
  139. }
  140. size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
  141. {
  142. size_t ret = 0;
  143. struct rb_node *nd;
  144. for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
  145. struct thread *pos = rb_entry(nd, struct thread, rb_node);
  146. ret += thread__fprintf(pos, fp);
  147. }
  148. return ret;
  149. }