map.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. #include "symbol.h"
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include "map.h"
  10. #include "thread.h"
  11. #include "strlist.h"
  12. const char *map_type__name[MAP__NR_TYPES] = {
  13. [MAP__FUNCTION] = "Functions",
  14. [MAP__VARIABLE] = "Variables",
  15. };
  16. static inline int is_anon_memory(const char *filename)
  17. {
  18. return strcmp(filename, "//anon") == 0;
  19. }
  20. static inline int is_no_dso_memory(const char *filename)
  21. {
  22. return !strcmp(filename, "[stack]") ||
  23. !strcmp(filename, "[vdso]") ||
  24. !strcmp(filename, "[heap]");
  25. }
  26. void map__init(struct map *self, enum map_type type,
  27. u64 start, u64 end, u64 pgoff, struct dso *dso)
  28. {
  29. self->type = type;
  30. self->start = start;
  31. self->end = end;
  32. self->pgoff = pgoff;
  33. self->dso = dso;
  34. self->map_ip = map__map_ip;
  35. self->unmap_ip = map__unmap_ip;
  36. RB_CLEAR_NODE(&self->rb_node);
  37. self->groups = NULL;
  38. self->referenced = false;
  39. self->erange_warned = false;
  40. }
  41. struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
  42. u64 pgoff, u32 pid, char *filename,
  43. enum map_type type)
  44. {
  45. struct map *self = malloc(sizeof(*self));
  46. if (self != NULL) {
  47. char newfilename[PATH_MAX];
  48. struct dso *dso;
  49. int anon, no_dso;
  50. anon = is_anon_memory(filename);
  51. no_dso = is_no_dso_memory(filename);
  52. if (anon) {
  53. snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
  54. filename = newfilename;
  55. }
  56. dso = __dsos__findnew(dsos__list, filename);
  57. if (dso == NULL)
  58. goto out_delete;
  59. map__init(self, type, start, start + len, pgoff, dso);
  60. if (anon || no_dso) {
  61. self->map_ip = self->unmap_ip = identity__map_ip;
  62. /*
  63. * Set memory without DSO as loaded. All map__find_*
  64. * functions still return NULL, and we avoid the
  65. * unnecessary map__load warning.
  66. */
  67. if (no_dso)
  68. dso__set_loaded(dso, self->type);
  69. }
  70. }
  71. return self;
  72. out_delete:
  73. free(self);
  74. return NULL;
  75. }
  76. /*
  77. * Constructor variant for modules (where we know from /proc/modules where
  78. * they are loaded) and for vmlinux, where only after we load all the
  79. * symbols we'll know where it starts and ends.
  80. */
  81. struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  82. {
  83. struct map *map = calloc(1, (sizeof(*map) +
  84. (dso->kernel ? sizeof(struct kmap) : 0)));
  85. if (map != NULL) {
  86. /*
  87. * ->end will be filled after we load all the symbols
  88. */
  89. map__init(map, type, start, 0, 0, dso);
  90. }
  91. return map;
  92. }
  93. void map__delete(struct map *self)
  94. {
  95. free(self);
  96. }
  97. void map__fixup_start(struct map *self)
  98. {
  99. struct rb_root *symbols = &self->dso->symbols[self->type];
  100. struct rb_node *nd = rb_first(symbols);
  101. if (nd != NULL) {
  102. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  103. self->start = sym->start;
  104. }
  105. }
  106. void map__fixup_end(struct map *self)
  107. {
  108. struct rb_root *symbols = &self->dso->symbols[self->type];
  109. struct rb_node *nd = rb_last(symbols);
  110. if (nd != NULL) {
  111. struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
  112. self->end = sym->end;
  113. }
  114. }
  115. #define DSO__DELETED "(deleted)"
  116. int map__load(struct map *self, symbol_filter_t filter)
  117. {
  118. const char *name = self->dso->long_name;
  119. int nr;
  120. if (dso__loaded(self->dso, self->type))
  121. return 0;
  122. nr = dso__load(self->dso, self, filter);
  123. if (nr < 0) {
  124. if (self->dso->has_build_id) {
  125. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  126. build_id__sprintf(self->dso->build_id,
  127. sizeof(self->dso->build_id),
  128. sbuild_id);
  129. pr_warning("%s with build id %s not found",
  130. name, sbuild_id);
  131. } else
  132. pr_warning("Failed to open %s", name);
  133. pr_warning(", continuing without symbols\n");
  134. return -1;
  135. } else if (nr == 0) {
  136. #ifndef NO_LIBELF_SUPPORT
  137. const size_t len = strlen(name);
  138. const size_t real_len = len - sizeof(DSO__DELETED);
  139. if (len > sizeof(DSO__DELETED) &&
  140. strcmp(name + real_len + 1, DSO__DELETED) == 0) {
  141. pr_warning("%.*s was updated (is prelink enabled?). "
  142. "Restart the long running apps that use it!\n",
  143. (int)real_len, name);
  144. } else {
  145. pr_warning("no symbols found in %s, maybe install "
  146. "a debug package?\n", name);
  147. }
  148. #endif
  149. return -1;
  150. }
  151. /*
  152. * Only applies to the kernel, as its symtabs aren't relative like the
  153. * module ones.
  154. */
  155. if (self->dso->kernel)
  156. map__reloc_vmlinux(self);
  157. return 0;
  158. }
  159. struct symbol *map__find_symbol(struct map *self, u64 addr,
  160. symbol_filter_t filter)
  161. {
  162. if (map__load(self, filter) < 0)
  163. return NULL;
  164. return dso__find_symbol(self->dso, self->type, addr);
  165. }
  166. struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
  167. symbol_filter_t filter)
  168. {
  169. if (map__load(self, filter) < 0)
  170. return NULL;
  171. if (!dso__sorted_by_name(self->dso, self->type))
  172. dso__sort_by_name(self->dso, self->type);
  173. return dso__find_symbol_by_name(self->dso, self->type, name);
  174. }
  175. struct map *map__clone(struct map *self)
  176. {
  177. struct map *map = malloc(sizeof(*self));
  178. if (!map)
  179. return NULL;
  180. memcpy(map, self, sizeof(*self));
  181. return map;
  182. }
  183. int map__overlap(struct map *l, struct map *r)
  184. {
  185. if (l->start > r->start) {
  186. struct map *t = l;
  187. l = r;
  188. r = t;
  189. }
  190. if (l->end > r->start)
  191. return 1;
  192. return 0;
  193. }
  194. size_t map__fprintf(struct map *self, FILE *fp)
  195. {
  196. return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
  197. self->start, self->end, self->pgoff, self->dso->name);
  198. }
  199. size_t map__fprintf_dsoname(struct map *map, FILE *fp)
  200. {
  201. const char *dsoname;
  202. if (map && map->dso && (map->dso->name || map->dso->long_name)) {
  203. if (symbol_conf.show_kernel_path && map->dso->long_name)
  204. dsoname = map->dso->long_name;
  205. else if (map->dso->name)
  206. dsoname = map->dso->name;
  207. } else
  208. dsoname = "[unknown]";
  209. return fprintf(fp, "%s", dsoname);
  210. }
  211. /*
  212. * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
  213. * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
  214. */
  215. u64 map__rip_2objdump(struct map *map, u64 rip)
  216. {
  217. u64 addr = map->dso->adjust_symbols ?
  218. map->unmap_ip(map, rip) : /* RIP -> IP */
  219. rip;
  220. return addr;
  221. }
  222. void map_groups__init(struct map_groups *mg)
  223. {
  224. int i;
  225. for (i = 0; i < MAP__NR_TYPES; ++i) {
  226. mg->maps[i] = RB_ROOT;
  227. INIT_LIST_HEAD(&mg->removed_maps[i]);
  228. }
  229. mg->machine = NULL;
  230. }
  231. static void maps__delete(struct rb_root *maps)
  232. {
  233. struct rb_node *next = rb_first(maps);
  234. while (next) {
  235. struct map *pos = rb_entry(next, struct map, rb_node);
  236. next = rb_next(&pos->rb_node);
  237. rb_erase(&pos->rb_node, maps);
  238. map__delete(pos);
  239. }
  240. }
  241. static void maps__delete_removed(struct list_head *maps)
  242. {
  243. struct map *pos, *n;
  244. list_for_each_entry_safe(pos, n, maps, node) {
  245. list_del(&pos->node);
  246. map__delete(pos);
  247. }
  248. }
  249. void map_groups__exit(struct map_groups *mg)
  250. {
  251. int i;
  252. for (i = 0; i < MAP__NR_TYPES; ++i) {
  253. maps__delete(&mg->maps[i]);
  254. maps__delete_removed(&mg->removed_maps[i]);
  255. }
  256. }
  257. void map_groups__flush(struct map_groups *mg)
  258. {
  259. int type;
  260. for (type = 0; type < MAP__NR_TYPES; type++) {
  261. struct rb_root *root = &mg->maps[type];
  262. struct rb_node *next = rb_first(root);
  263. while (next) {
  264. struct map *pos = rb_entry(next, struct map, rb_node);
  265. next = rb_next(&pos->rb_node);
  266. rb_erase(&pos->rb_node, root);
  267. /*
  268. * We may have references to this map, for
  269. * instance in some hist_entry instances, so
  270. * just move them to a separate list.
  271. */
  272. list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
  273. }
  274. }
  275. }
  276. struct symbol *map_groups__find_symbol(struct map_groups *mg,
  277. enum map_type type, u64 addr,
  278. struct map **mapp,
  279. symbol_filter_t filter)
  280. {
  281. struct map *map = map_groups__find(mg, type, addr);
  282. if (map != NULL) {
  283. if (mapp != NULL)
  284. *mapp = map;
  285. return map__find_symbol(map, map->map_ip(map, addr), filter);
  286. }
  287. return NULL;
  288. }
  289. struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
  290. enum map_type type,
  291. const char *name,
  292. struct map **mapp,
  293. symbol_filter_t filter)
  294. {
  295. struct rb_node *nd;
  296. for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
  297. struct map *pos = rb_entry(nd, struct map, rb_node);
  298. struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
  299. if (sym == NULL)
  300. continue;
  301. if (mapp != NULL)
  302. *mapp = pos;
  303. return sym;
  304. }
  305. return NULL;
  306. }
  307. size_t __map_groups__fprintf_maps(struct map_groups *mg,
  308. enum map_type type, int verbose, FILE *fp)
  309. {
  310. size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
  311. struct rb_node *nd;
  312. for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
  313. struct map *pos = rb_entry(nd, struct map, rb_node);
  314. printed += fprintf(fp, "Map:");
  315. printed += map__fprintf(pos, fp);
  316. if (verbose > 2) {
  317. printed += dso__fprintf(pos->dso, type, fp);
  318. printed += fprintf(fp, "--\n");
  319. }
  320. }
  321. return printed;
  322. }
  323. size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
  324. {
  325. size_t printed = 0, i;
  326. for (i = 0; i < MAP__NR_TYPES; ++i)
  327. printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
  328. return printed;
  329. }
  330. static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
  331. enum map_type type,
  332. int verbose, FILE *fp)
  333. {
  334. struct map *pos;
  335. size_t printed = 0;
  336. list_for_each_entry(pos, &mg->removed_maps[type], node) {
  337. printed += fprintf(fp, "Map:");
  338. printed += map__fprintf(pos, fp);
  339. if (verbose > 1) {
  340. printed += dso__fprintf(pos->dso, type, fp);
  341. printed += fprintf(fp, "--\n");
  342. }
  343. }
  344. return printed;
  345. }
  346. static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
  347. int verbose, FILE *fp)
  348. {
  349. size_t printed = 0, i;
  350. for (i = 0; i < MAP__NR_TYPES; ++i)
  351. printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
  352. return printed;
  353. }
  354. size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
  355. {
  356. size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
  357. printed += fprintf(fp, "Removed maps:\n");
  358. return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
  359. }
  360. int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
  361. int verbose, FILE *fp)
  362. {
  363. struct rb_root *root = &mg->maps[map->type];
  364. struct rb_node *next = rb_first(root);
  365. int err = 0;
  366. while (next) {
  367. struct map *pos = rb_entry(next, struct map, rb_node);
  368. next = rb_next(&pos->rb_node);
  369. if (!map__overlap(pos, map))
  370. continue;
  371. if (verbose >= 2) {
  372. fputs("overlapping maps:\n", fp);
  373. map__fprintf(map, fp);
  374. map__fprintf(pos, fp);
  375. }
  376. rb_erase(&pos->rb_node, root);
  377. /*
  378. * Now check if we need to create new maps for areas not
  379. * overlapped by the new map:
  380. */
  381. if (map->start > pos->start) {
  382. struct map *before = map__clone(pos);
  383. if (before == NULL) {
  384. err = -ENOMEM;
  385. goto move_map;
  386. }
  387. before->end = map->start - 1;
  388. map_groups__insert(mg, before);
  389. if (verbose >= 2)
  390. map__fprintf(before, fp);
  391. }
  392. if (map->end < pos->end) {
  393. struct map *after = map__clone(pos);
  394. if (after == NULL) {
  395. err = -ENOMEM;
  396. goto move_map;
  397. }
  398. after->start = map->end + 1;
  399. map_groups__insert(mg, after);
  400. if (verbose >= 2)
  401. map__fprintf(after, fp);
  402. }
  403. move_map:
  404. /*
  405. * If we have references, just move them to a separate list.
  406. */
  407. if (pos->referenced)
  408. list_add_tail(&pos->node, &mg->removed_maps[map->type]);
  409. else
  410. map__delete(pos);
  411. if (err)
  412. return err;
  413. }
  414. return 0;
  415. }
  416. /*
  417. * XXX This should not really _copy_ te maps, but refcount them.
  418. */
  419. int map_groups__clone(struct map_groups *mg,
  420. struct map_groups *parent, enum map_type type)
  421. {
  422. struct rb_node *nd;
  423. for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
  424. struct map *map = rb_entry(nd, struct map, rb_node);
  425. struct map *new = map__clone(map);
  426. if (new == NULL)
  427. return -ENOMEM;
  428. map_groups__insert(mg, new);
  429. }
  430. return 0;
  431. }
  432. static u64 map__reloc_map_ip(struct map *map, u64 ip)
  433. {
  434. return ip + (s64)map->pgoff;
  435. }
  436. static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
  437. {
  438. return ip - (s64)map->pgoff;
  439. }
  440. void map__reloc_vmlinux(struct map *self)
  441. {
  442. struct kmap *kmap = map__kmap(self);
  443. s64 reloc;
  444. if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
  445. return;
  446. reloc = (kmap->ref_reloc_sym->unrelocated_addr -
  447. kmap->ref_reloc_sym->addr);
  448. if (!reloc)
  449. return;
  450. self->map_ip = map__reloc_map_ip;
  451. self->unmap_ip = map__reloc_unmap_ip;
  452. self->pgoff = reloc;
  453. }
  454. void maps__insert(struct rb_root *maps, struct map *map)
  455. {
  456. struct rb_node **p = &maps->rb_node;
  457. struct rb_node *parent = NULL;
  458. const u64 ip = map->start;
  459. struct map *m;
  460. while (*p != NULL) {
  461. parent = *p;
  462. m = rb_entry(parent, struct map, rb_node);
  463. if (ip < m->start)
  464. p = &(*p)->rb_left;
  465. else
  466. p = &(*p)->rb_right;
  467. }
  468. rb_link_node(&map->rb_node, parent, p);
  469. rb_insert_color(&map->rb_node, maps);
  470. }
  471. void maps__remove(struct rb_root *self, struct map *map)
  472. {
  473. rb_erase(&map->rb_node, self);
  474. }
  475. struct map *maps__find(struct rb_root *maps, u64 ip)
  476. {
  477. struct rb_node **p = &maps->rb_node;
  478. struct rb_node *parent = NULL;
  479. struct map *m;
  480. while (*p != NULL) {
  481. parent = *p;
  482. m = rb_entry(parent, struct map, rb_node);
  483. if (ip < m->start)
  484. p = &(*p)->rb_left;
  485. else if (ip > m->end)
  486. p = &(*p)->rb_right;
  487. else
  488. return m;
  489. }
  490. return NULL;
  491. }
  492. int machine__init(struct machine *self, const char *root_dir, pid_t pid)
  493. {
  494. map_groups__init(&self->kmaps);
  495. RB_CLEAR_NODE(&self->rb_node);
  496. INIT_LIST_HEAD(&self->user_dsos);
  497. INIT_LIST_HEAD(&self->kernel_dsos);
  498. self->threads = RB_ROOT;
  499. INIT_LIST_HEAD(&self->dead_threads);
  500. self->last_match = NULL;
  501. self->kmaps.machine = self;
  502. self->pid = pid;
  503. self->root_dir = strdup(root_dir);
  504. if (self->root_dir == NULL)
  505. return -ENOMEM;
  506. if (pid != HOST_KERNEL_ID) {
  507. struct thread *thread = machine__findnew_thread(self, pid);
  508. char comm[64];
  509. if (thread == NULL)
  510. return -ENOMEM;
  511. snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  512. thread__set_comm(thread, comm);
  513. }
  514. return 0;
  515. }
  516. static void dsos__delete(struct list_head *self)
  517. {
  518. struct dso *pos, *n;
  519. list_for_each_entry_safe(pos, n, self, node) {
  520. list_del(&pos->node);
  521. dso__delete(pos);
  522. }
  523. }
  524. void machine__exit(struct machine *self)
  525. {
  526. map_groups__exit(&self->kmaps);
  527. dsos__delete(&self->user_dsos);
  528. dsos__delete(&self->kernel_dsos);
  529. free(self->root_dir);
  530. self->root_dir = NULL;
  531. }
  532. void machine__delete(struct machine *self)
  533. {
  534. machine__exit(self);
  535. free(self);
  536. }
  537. struct machine *machines__add(struct rb_root *self, pid_t pid,
  538. const char *root_dir)
  539. {
  540. struct rb_node **p = &self->rb_node;
  541. struct rb_node *parent = NULL;
  542. struct machine *pos, *machine = malloc(sizeof(*machine));
  543. if (!machine)
  544. return NULL;
  545. if (machine__init(machine, root_dir, pid) != 0) {
  546. free(machine);
  547. return NULL;
  548. }
  549. while (*p != NULL) {
  550. parent = *p;
  551. pos = rb_entry(parent, struct machine, rb_node);
  552. if (pid < pos->pid)
  553. p = &(*p)->rb_left;
  554. else
  555. p = &(*p)->rb_right;
  556. }
  557. rb_link_node(&machine->rb_node, parent, p);
  558. rb_insert_color(&machine->rb_node, self);
  559. return machine;
  560. }
  561. struct machine *machines__find(struct rb_root *self, pid_t pid)
  562. {
  563. struct rb_node **p = &self->rb_node;
  564. struct rb_node *parent = NULL;
  565. struct machine *machine;
  566. struct machine *default_machine = NULL;
  567. while (*p != NULL) {
  568. parent = *p;
  569. machine = rb_entry(parent, struct machine, rb_node);
  570. if (pid < machine->pid)
  571. p = &(*p)->rb_left;
  572. else if (pid > machine->pid)
  573. p = &(*p)->rb_right;
  574. else
  575. return machine;
  576. if (!machine->pid)
  577. default_machine = machine;
  578. }
  579. return default_machine;
  580. }
  581. struct machine *machines__findnew(struct rb_root *self, pid_t pid)
  582. {
  583. char path[PATH_MAX];
  584. const char *root_dir = "";
  585. struct machine *machine = machines__find(self, pid);
  586. if (machine && (machine->pid == pid))
  587. goto out;
  588. if ((pid != HOST_KERNEL_ID) &&
  589. (pid != DEFAULT_GUEST_KERNEL_ID) &&
  590. (symbol_conf.guestmount)) {
  591. sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
  592. if (access(path, R_OK)) {
  593. static struct strlist *seen;
  594. if (!seen)
  595. seen = strlist__new(true, NULL);
  596. if (!strlist__has_entry(seen, path)) {
  597. pr_err("Can't access file %s\n", path);
  598. strlist__add(seen, path);
  599. }
  600. machine = NULL;
  601. goto out;
  602. }
  603. root_dir = path;
  604. }
  605. machine = machines__add(self, pid, root_dir);
  606. out:
  607. return machine;
  608. }
  609. void machines__process(struct rb_root *self, machine__process_t process, void *data)
  610. {
  611. struct rb_node *nd;
  612. for (nd = rb_first(self); nd; nd = rb_next(nd)) {
  613. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  614. process(pos, data);
  615. }
  616. }
  617. char *machine__mmap_name(struct machine *self, char *bf, size_t size)
  618. {
  619. if (machine__is_host(self))
  620. snprintf(bf, size, "[%s]", "kernel.kallsyms");
  621. else if (machine__is_default_guest(self))
  622. snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
  623. else
  624. snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
  625. return bf;
  626. }
  627. void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
  628. {
  629. struct rb_node *node;
  630. struct machine *machine;
  631. for (node = rb_first(machines); node; node = rb_next(node)) {
  632. machine = rb_entry(node, struct machine, rb_node);
  633. machine->id_hdr_size = id_hdr_size;
  634. }
  635. return;
  636. }