symbol.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452
  1. #define _GNU_SOURCE
  2. #include <ctype.h>
  3. #include <dirent.h>
  4. #include <errno.h>
  5. #include <libgen.h>
  6. #include <stdlib.h>
  7. #include <stdio.h>
  8. #include <string.h>
  9. #include <sys/types.h>
  10. #include <sys/stat.h>
  11. #include <sys/param.h>
  12. #include <fcntl.h>
  13. #include <unistd.h>
  14. #include "build-id.h"
  15. #include "debug.h"
  16. #include "symbol.h"
  17. #include "strlist.h"
  18. #include <libelf.h>
  19. #include <gelf.h>
  20. #include <elf.h>
  21. #include <limits.h>
  22. #include <sys/utsname.h>
  23. #ifndef NT_GNU_BUILD_ID
  24. #define NT_GNU_BUILD_ID 3
  25. #endif
  26. static bool dso__build_id_equal(const struct dso *self, u8 *build_id);
  27. static int elf_read_build_id(Elf *elf, void *bf, size_t size);
  28. static void dsos__add(struct list_head *head, struct dso *dso);
  29. static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
  30. static int dso__load_kernel_sym(struct dso *self, struct map *map,
  31. symbol_filter_t filter);
  32. static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
  33. symbol_filter_t filter);
  34. static int vmlinux_path__nr_entries;
  35. static char **vmlinux_path;
  36. struct symbol_conf symbol_conf = {
  37. .exclude_other = true,
  38. .use_modules = true,
  39. .try_vmlinux_path = true,
  40. };
  41. int dso__name_len(const struct dso *self)
  42. {
  43. if (verbose)
  44. return self->long_name_len;
  45. return self->short_name_len;
  46. }
  47. bool dso__loaded(const struct dso *self, enum map_type type)
  48. {
  49. return self->loaded & (1 << type);
  50. }
  51. bool dso__sorted_by_name(const struct dso *self, enum map_type type)
  52. {
  53. return self->sorted_by_name & (1 << type);
  54. }
  55. static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
  56. {
  57. self->sorted_by_name |= (1 << type);
  58. }
  59. bool symbol_type__is_a(char symbol_type, enum map_type map_type)
  60. {
  61. switch (map_type) {
  62. case MAP__FUNCTION:
  63. return symbol_type == 'T' || symbol_type == 'W';
  64. case MAP__VARIABLE:
  65. return symbol_type == 'D' || symbol_type == 'd';
  66. default:
  67. return false;
  68. }
  69. }
  70. static void symbols__fixup_end(struct rb_root *self)
  71. {
  72. struct rb_node *nd, *prevnd = rb_first(self);
  73. struct symbol *curr, *prev;
  74. if (prevnd == NULL)
  75. return;
  76. curr = rb_entry(prevnd, struct symbol, rb_node);
  77. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  78. prev = curr;
  79. curr = rb_entry(nd, struct symbol, rb_node);
  80. if (prev->end == prev->start)
  81. prev->end = curr->start - 1;
  82. }
  83. /* Last entry */
  84. if (curr->end == curr->start)
  85. curr->end = roundup(curr->start, 4096);
  86. }
  87. static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
  88. {
  89. struct map *prev, *curr;
  90. struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
  91. if (prevnd == NULL)
  92. return;
  93. curr = rb_entry(prevnd, struct map, rb_node);
  94. for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
  95. prev = curr;
  96. curr = rb_entry(nd, struct map, rb_node);
  97. prev->end = curr->start - 1;
  98. }
  99. /*
  100. * We still haven't the actual symbols, so guess the
  101. * last map final address.
  102. */
  103. curr->end = ~0UL;
  104. }
  105. static void map_groups__fixup_end(struct map_groups *self)
  106. {
  107. int i;
  108. for (i = 0; i < MAP__NR_TYPES; ++i)
  109. __map_groups__fixup_end(self, i);
  110. }
  111. static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
  112. const char *name)
  113. {
  114. size_t namelen = strlen(name) + 1;
  115. struct symbol *self = calloc(1, (symbol_conf.priv_size +
  116. sizeof(*self) + namelen));
  117. if (self == NULL)
  118. return NULL;
  119. if (symbol_conf.priv_size)
  120. self = ((void *)self) + symbol_conf.priv_size;
  121. self->start = start;
  122. self->end = len ? start + len - 1 : start;
  123. self->binding = binding;
  124. self->namelen = namelen - 1;
  125. pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
  126. memcpy(self->name, name, namelen);
  127. return self;
  128. }
  129. void symbol__delete(struct symbol *self)
  130. {
  131. free(((void *)self) - symbol_conf.priv_size);
  132. }
  133. static size_t symbol__fprintf(struct symbol *self, FILE *fp)
  134. {
  135. return fprintf(fp, " %llx-%llx %c %s\n",
  136. self->start, self->end,
  137. self->binding == STB_GLOBAL ? 'g' :
  138. self->binding == STB_LOCAL ? 'l' : 'w',
  139. self->name);
  140. }
  141. void dso__set_long_name(struct dso *self, char *name)
  142. {
  143. if (name == NULL)
  144. return;
  145. self->long_name = name;
  146. self->long_name_len = strlen(name);
  147. }
  148. static void dso__set_short_name(struct dso *self, const char *name)
  149. {
  150. if (name == NULL)
  151. return;
  152. self->short_name = name;
  153. self->short_name_len = strlen(name);
  154. }
  155. static void dso__set_basename(struct dso *self)
  156. {
  157. dso__set_short_name(self, basename(self->long_name));
  158. }
  159. struct dso *dso__new(const char *name)
  160. {
  161. struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
  162. if (self != NULL) {
  163. int i;
  164. strcpy(self->name, name);
  165. dso__set_long_name(self, self->name);
  166. dso__set_short_name(self, self->name);
  167. for (i = 0; i < MAP__NR_TYPES; ++i)
  168. self->symbols[i] = self->symbol_names[i] = RB_ROOT;
  169. self->slen_calculated = 0;
  170. self->origin = DSO__ORIG_NOT_FOUND;
  171. self->loaded = 0;
  172. self->sorted_by_name = 0;
  173. self->has_build_id = 0;
  174. self->kernel = DSO_TYPE_USER;
  175. INIT_LIST_HEAD(&self->node);
  176. }
  177. return self;
  178. }
  179. static void symbols__delete(struct rb_root *self)
  180. {
  181. struct symbol *pos;
  182. struct rb_node *next = rb_first(self);
  183. while (next) {
  184. pos = rb_entry(next, struct symbol, rb_node);
  185. next = rb_next(&pos->rb_node);
  186. rb_erase(&pos->rb_node, self);
  187. symbol__delete(pos);
  188. }
  189. }
  190. void dso__delete(struct dso *self)
  191. {
  192. int i;
  193. for (i = 0; i < MAP__NR_TYPES; ++i)
  194. symbols__delete(&self->symbols[i]);
  195. if (self->sname_alloc)
  196. free((char *)self->short_name);
  197. if (self->lname_alloc)
  198. free(self->long_name);
  199. free(self);
  200. }
  201. void dso__set_build_id(struct dso *self, void *build_id)
  202. {
  203. memcpy(self->build_id, build_id, sizeof(self->build_id));
  204. self->has_build_id = 1;
  205. }
  206. static void symbols__insert(struct rb_root *self, struct symbol *sym)
  207. {
  208. struct rb_node **p = &self->rb_node;
  209. struct rb_node *parent = NULL;
  210. const u64 ip = sym->start;
  211. struct symbol *s;
  212. while (*p != NULL) {
  213. parent = *p;
  214. s = rb_entry(parent, struct symbol, rb_node);
  215. if (ip < s->start)
  216. p = &(*p)->rb_left;
  217. else
  218. p = &(*p)->rb_right;
  219. }
  220. rb_link_node(&sym->rb_node, parent, p);
  221. rb_insert_color(&sym->rb_node, self);
  222. }
  223. static struct symbol *symbols__find(struct rb_root *self, u64 ip)
  224. {
  225. struct rb_node *n;
  226. if (self == NULL)
  227. return NULL;
  228. n = self->rb_node;
  229. while (n) {
  230. struct symbol *s = rb_entry(n, struct symbol, rb_node);
  231. if (ip < s->start)
  232. n = n->rb_left;
  233. else if (ip > s->end)
  234. n = n->rb_right;
  235. else
  236. return s;
  237. }
  238. return NULL;
  239. }
  240. struct symbol_name_rb_node {
  241. struct rb_node rb_node;
  242. struct symbol sym;
  243. };
  244. static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
  245. {
  246. struct rb_node **p = &self->rb_node;
  247. struct rb_node *parent = NULL;
  248. struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
  249. while (*p != NULL) {
  250. parent = *p;
  251. s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
  252. if (strcmp(sym->name, s->sym.name) < 0)
  253. p = &(*p)->rb_left;
  254. else
  255. p = &(*p)->rb_right;
  256. }
  257. rb_link_node(&symn->rb_node, parent, p);
  258. rb_insert_color(&symn->rb_node, self);
  259. }
  260. static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
  261. {
  262. struct rb_node *nd;
  263. for (nd = rb_first(source); nd; nd = rb_next(nd)) {
  264. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  265. symbols__insert_by_name(self, pos);
  266. }
  267. }
  268. static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
  269. {
  270. struct rb_node *n;
  271. if (self == NULL)
  272. return NULL;
  273. n = self->rb_node;
  274. while (n) {
  275. struct symbol_name_rb_node *s;
  276. int cmp;
  277. s = rb_entry(n, struct symbol_name_rb_node, rb_node);
  278. cmp = strcmp(name, s->sym.name);
  279. if (cmp < 0)
  280. n = n->rb_left;
  281. else if (cmp > 0)
  282. n = n->rb_right;
  283. else
  284. return &s->sym;
  285. }
  286. return NULL;
  287. }
  288. struct symbol *dso__find_symbol(struct dso *self,
  289. enum map_type type, u64 addr)
  290. {
  291. return symbols__find(&self->symbols[type], addr);
  292. }
  293. struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
  294. const char *name)
  295. {
  296. return symbols__find_by_name(&self->symbol_names[type], name);
  297. }
  298. void dso__sort_by_name(struct dso *self, enum map_type type)
  299. {
  300. dso__set_sorted_by_name(self, type);
  301. return symbols__sort_by_name(&self->symbol_names[type],
  302. &self->symbols[type]);
  303. }
  304. int build_id__sprintf(const u8 *self, int len, char *bf)
  305. {
  306. char *bid = bf;
  307. const u8 *raw = self;
  308. int i;
  309. for (i = 0; i < len; ++i) {
  310. sprintf(bid, "%02x", *raw);
  311. ++raw;
  312. bid += 2;
  313. }
  314. return raw - self;
  315. }
  316. size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
  317. {
  318. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  319. build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
  320. return fprintf(fp, "%s", sbuild_id);
  321. }
  322. size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
  323. {
  324. struct rb_node *nd;
  325. size_t ret = fprintf(fp, "dso: %s (", self->short_name);
  326. if (self->short_name != self->long_name)
  327. ret += fprintf(fp, "%s, ", self->long_name);
  328. ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
  329. self->loaded ? "" : "NOT ");
  330. ret += dso__fprintf_buildid(self, fp);
  331. ret += fprintf(fp, ")\n");
  332. for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
  333. struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
  334. ret += symbol__fprintf(pos, fp);
  335. }
  336. return ret;
  337. }
  338. int kallsyms__parse(const char *filename, void *arg,
  339. int (*process_symbol)(void *arg, const char *name,
  340. char type, u64 start))
  341. {
  342. char *line = NULL;
  343. size_t n;
  344. int err = 0;
  345. FILE *file = fopen(filename, "r");
  346. if (file == NULL)
  347. goto out_failure;
  348. while (!feof(file)) {
  349. u64 start;
  350. int line_len, len;
  351. char symbol_type;
  352. char *symbol_name;
  353. line_len = getline(&line, &n, file);
  354. if (line_len < 0 || !line)
  355. break;
  356. line[--line_len] = '\0'; /* \n */
  357. len = hex2u64(line, &start);
  358. len++;
  359. if (len + 2 >= line_len)
  360. continue;
  361. symbol_type = toupper(line[len]);
  362. symbol_name = line + len + 2;
  363. err = process_symbol(arg, symbol_name, symbol_type, start);
  364. if (err)
  365. break;
  366. }
  367. free(line);
  368. fclose(file);
  369. return err;
  370. out_failure:
  371. return -1;
  372. }
  373. struct process_kallsyms_args {
  374. struct map *map;
  375. struct dso *dso;
  376. };
  377. static u8 kallsyms2elf_type(char type)
  378. {
  379. if (type == 'W')
  380. return STB_WEAK;
  381. return isupper(type) ? STB_GLOBAL : STB_LOCAL;
  382. }
  383. static int map__process_kallsym_symbol(void *arg, const char *name,
  384. char type, u64 start)
  385. {
  386. struct symbol *sym;
  387. struct process_kallsyms_args *a = arg;
  388. struct rb_root *root = &a->dso->symbols[a->map->type];
  389. if (!symbol_type__is_a(type, a->map->type))
  390. return 0;
  391. /*
  392. * Will fix up the end later, when we have all symbols sorted.
  393. */
  394. sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
  395. if (sym == NULL)
  396. return -ENOMEM;
  397. /*
  398. * We will pass the symbols to the filter later, in
  399. * map__split_kallsyms, when we have split the maps per module
  400. */
  401. symbols__insert(root, sym);
  402. return 0;
  403. }
  404. /*
  405. * Loads the function entries in /proc/kallsyms into kernel_map->dso,
  406. * so that we can in the next step set the symbol ->end address and then
  407. * call kernel_maps__split_kallsyms.
  408. */
  409. static int dso__load_all_kallsyms(struct dso *self, const char *filename,
  410. struct map *map)
  411. {
  412. struct process_kallsyms_args args = { .map = map, .dso = self, };
  413. return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
  414. }
  415. /*
  416. * Split the symbols into maps, making sure there are no overlaps, i.e. the
  417. * kernel range is broken in several maps, named [kernel].N, as we don't have
  418. * the original ELF section names vmlinux have.
  419. */
  420. static int dso__split_kallsyms(struct dso *self, struct map *map,
  421. symbol_filter_t filter)
  422. {
  423. struct map_groups *kmaps = map__kmap(map)->kmaps;
  424. struct machine *machine = kmaps->machine;
  425. struct map *curr_map = map;
  426. struct symbol *pos;
  427. int count = 0;
  428. struct rb_root *root = &self->symbols[map->type];
  429. struct rb_node *next = rb_first(root);
  430. int kernel_range = 0;
  431. while (next) {
  432. char *module;
  433. pos = rb_entry(next, struct symbol, rb_node);
  434. next = rb_next(&pos->rb_node);
  435. module = strchr(pos->name, '\t');
  436. if (module) {
  437. if (!symbol_conf.use_modules)
  438. goto discard_symbol;
  439. *module++ = '\0';
  440. if (strcmp(curr_map->dso->short_name, module)) {
  441. if (curr_map != map &&
  442. self->kernel == DSO_TYPE_GUEST_KERNEL &&
  443. machine__is_default_guest(machine)) {
  444. /*
  445. * We assume all symbols of a module are
  446. * continuous in * kallsyms, so curr_map
  447. * points to a module and all its
  448. * symbols are in its kmap. Mark it as
  449. * loaded.
  450. */
  451. dso__set_loaded(curr_map->dso,
  452. curr_map->type);
  453. }
  454. curr_map = map_groups__find_by_name(kmaps,
  455. map->type, module);
  456. if (curr_map == NULL) {
  457. pr_debug("%s/proc/{kallsyms,modules} "
  458. "inconsistency while looking "
  459. "for \"%s\" module!\n",
  460. machine->root_dir, module);
  461. curr_map = map;
  462. goto discard_symbol;
  463. }
  464. if (curr_map->dso->loaded &&
  465. !machine__is_default_guest(machine))
  466. goto discard_symbol;
  467. }
  468. /*
  469. * So that we look just like we get from .ko files,
  470. * i.e. not prelinked, relative to map->start.
  471. */
  472. pos->start = curr_map->map_ip(curr_map, pos->start);
  473. pos->end = curr_map->map_ip(curr_map, pos->end);
  474. } else if (curr_map != map) {
  475. char dso_name[PATH_MAX];
  476. struct dso *dso;
  477. if (self->kernel == DSO_TYPE_GUEST_KERNEL)
  478. snprintf(dso_name, sizeof(dso_name),
  479. "[guest.kernel].%d",
  480. kernel_range++);
  481. else
  482. snprintf(dso_name, sizeof(dso_name),
  483. "[kernel].%d",
  484. kernel_range++);
  485. dso = dso__new(dso_name);
  486. if (dso == NULL)
  487. return -1;
  488. dso->kernel = self->kernel;
  489. curr_map = map__new2(pos->start, dso, map->type);
  490. if (curr_map == NULL) {
  491. dso__delete(dso);
  492. return -1;
  493. }
  494. curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
  495. map_groups__insert(kmaps, curr_map);
  496. ++kernel_range;
  497. }
  498. if (filter && filter(curr_map, pos)) {
  499. discard_symbol: rb_erase(&pos->rb_node, root);
  500. symbol__delete(pos);
  501. } else {
  502. if (curr_map != map) {
  503. rb_erase(&pos->rb_node, root);
  504. symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
  505. }
  506. count++;
  507. }
  508. }
  509. if (curr_map != map &&
  510. self->kernel == DSO_TYPE_GUEST_KERNEL &&
  511. machine__is_default_guest(kmaps->machine)) {
  512. dso__set_loaded(curr_map->dso, curr_map->type);
  513. }
  514. return count;
  515. }
  516. int dso__load_kallsyms(struct dso *self, const char *filename,
  517. struct map *map, symbol_filter_t filter)
  518. {
  519. if (dso__load_all_kallsyms(self, filename, map) < 0)
  520. return -1;
  521. symbols__fixup_end(&self->symbols[map->type]);
  522. if (self->kernel == DSO_TYPE_GUEST_KERNEL)
  523. self->origin = DSO__ORIG_GUEST_KERNEL;
  524. else
  525. self->origin = DSO__ORIG_KERNEL;
  526. return dso__split_kallsyms(self, map, filter);
  527. }
  528. static int dso__load_perf_map(struct dso *self, struct map *map,
  529. symbol_filter_t filter)
  530. {
  531. char *line = NULL;
  532. size_t n;
  533. FILE *file;
  534. int nr_syms = 0;
  535. file = fopen(self->long_name, "r");
  536. if (file == NULL)
  537. goto out_failure;
  538. while (!feof(file)) {
  539. u64 start, size;
  540. struct symbol *sym;
  541. int line_len, len;
  542. line_len = getline(&line, &n, file);
  543. if (line_len < 0)
  544. break;
  545. if (!line)
  546. goto out_failure;
  547. line[--line_len] = '\0'; /* \n */
  548. len = hex2u64(line, &start);
  549. len++;
  550. if (len + 2 >= line_len)
  551. continue;
  552. len += hex2u64(line + len, &size);
  553. len++;
  554. if (len + 2 >= line_len)
  555. continue;
  556. sym = symbol__new(start, size, STB_GLOBAL, line + len);
  557. if (sym == NULL)
  558. goto out_delete_line;
  559. if (filter && filter(map, sym))
  560. symbol__delete(sym);
  561. else {
  562. symbols__insert(&self->symbols[map->type], sym);
  563. nr_syms++;
  564. }
  565. }
  566. free(line);
  567. fclose(file);
  568. return nr_syms;
  569. out_delete_line:
  570. free(line);
  571. out_failure:
  572. return -1;
  573. }
  574. /**
  575. * elf_symtab__for_each_symbol - iterate thru all the symbols
  576. *
  577. * @self: struct elf_symtab instance to iterate
  578. * @idx: uint32_t idx
  579. * @sym: GElf_Sym iterator
  580. */
  581. #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
  582. for (idx = 0, gelf_getsym(syms, idx, &sym);\
  583. idx < nr_syms; \
  584. idx++, gelf_getsym(syms, idx, &sym))
  585. static inline uint8_t elf_sym__type(const GElf_Sym *sym)
  586. {
  587. return GELF_ST_TYPE(sym->st_info);
  588. }
  589. static inline int elf_sym__is_function(const GElf_Sym *sym)
  590. {
  591. return elf_sym__type(sym) == STT_FUNC &&
  592. sym->st_name != 0 &&
  593. sym->st_shndx != SHN_UNDEF;
  594. }
  595. static inline bool elf_sym__is_object(const GElf_Sym *sym)
  596. {
  597. return elf_sym__type(sym) == STT_OBJECT &&
  598. sym->st_name != 0 &&
  599. sym->st_shndx != SHN_UNDEF;
  600. }
  601. static inline int elf_sym__is_label(const GElf_Sym *sym)
  602. {
  603. return elf_sym__type(sym) == STT_NOTYPE &&
  604. sym->st_name != 0 &&
  605. sym->st_shndx != SHN_UNDEF &&
  606. sym->st_shndx != SHN_ABS;
  607. }
  608. static inline const char *elf_sec__name(const GElf_Shdr *shdr,
  609. const Elf_Data *secstrs)
  610. {
  611. return secstrs->d_buf + shdr->sh_name;
  612. }
  613. static inline int elf_sec__is_text(const GElf_Shdr *shdr,
  614. const Elf_Data *secstrs)
  615. {
  616. return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
  617. }
  618. static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
  619. const Elf_Data *secstrs)
  620. {
  621. return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
  622. }
  623. static inline const char *elf_sym__name(const GElf_Sym *sym,
  624. const Elf_Data *symstrs)
  625. {
  626. return symstrs->d_buf + sym->st_name;
  627. }
  628. static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
  629. GElf_Shdr *shp, const char *name,
  630. size_t *idx)
  631. {
  632. Elf_Scn *sec = NULL;
  633. size_t cnt = 1;
  634. while ((sec = elf_nextscn(elf, sec)) != NULL) {
  635. char *str;
  636. gelf_getshdr(sec, shp);
  637. str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
  638. if (!strcmp(name, str)) {
  639. if (idx)
  640. *idx = cnt;
  641. break;
  642. }
  643. ++cnt;
  644. }
  645. return sec;
  646. }
  647. #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
  648. for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
  649. idx < nr_entries; \
  650. ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
  651. #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
  652. for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
  653. idx < nr_entries; \
  654. ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
  655. /*
  656. * We need to check if we have a .dynsym, so that we can handle the
  657. * .plt, synthesizing its symbols, that aren't on the symtabs (be it
  658. * .dynsym or .symtab).
  659. * And always look at the original dso, not at debuginfo packages, that
  660. * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
  661. */
  662. static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
  663. symbol_filter_t filter)
  664. {
  665. uint32_t nr_rel_entries, idx;
  666. GElf_Sym sym;
  667. u64 plt_offset;
  668. GElf_Shdr shdr_plt;
  669. struct symbol *f;
  670. GElf_Shdr shdr_rel_plt, shdr_dynsym;
  671. Elf_Data *reldata, *syms, *symstrs;
  672. Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
  673. size_t dynsym_idx;
  674. GElf_Ehdr ehdr;
  675. char sympltname[1024];
  676. Elf *elf;
  677. int nr = 0, symidx, fd, err = 0;
  678. fd = open(self->long_name, O_RDONLY);
  679. if (fd < 0)
  680. goto out;
  681. elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
  682. if (elf == NULL)
  683. goto out_close;
  684. if (gelf_getehdr(elf, &ehdr) == NULL)
  685. goto out_elf_end;
  686. scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
  687. ".dynsym", &dynsym_idx);
  688. if (scn_dynsym == NULL)
  689. goto out_elf_end;
  690. scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
  691. ".rela.plt", NULL);
  692. if (scn_plt_rel == NULL) {
  693. scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
  694. ".rel.plt", NULL);
  695. if (scn_plt_rel == NULL)
  696. goto out_elf_end;
  697. }
  698. err = -1;
  699. if (shdr_rel_plt.sh_link != dynsym_idx)
  700. goto out_elf_end;
  701. if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
  702. goto out_elf_end;
  703. /*
  704. * Fetch the relocation section to find the idxes to the GOT
  705. * and the symbols in the .dynsym they refer to.
  706. */
  707. reldata = elf_getdata(scn_plt_rel, NULL);
  708. if (reldata == NULL)
  709. goto out_elf_end;
  710. syms = elf_getdata(scn_dynsym, NULL);
  711. if (syms == NULL)
  712. goto out_elf_end;
  713. scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
  714. if (scn_symstrs == NULL)
  715. goto out_elf_end;
  716. symstrs = elf_getdata(scn_symstrs, NULL);
  717. if (symstrs == NULL)
  718. goto out_elf_end;
  719. nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
  720. plt_offset = shdr_plt.sh_offset;
  721. if (shdr_rel_plt.sh_type == SHT_RELA) {
  722. GElf_Rela pos_mem, *pos;
  723. elf_section__for_each_rela(reldata, pos, pos_mem, idx,
  724. nr_rel_entries) {
  725. symidx = GELF_R_SYM(pos->r_info);
  726. plt_offset += shdr_plt.sh_entsize;
  727. gelf_getsym(syms, symidx, &sym);
  728. snprintf(sympltname, sizeof(sympltname),
  729. "%s@plt", elf_sym__name(&sym, symstrs));
  730. f = symbol__new(plt_offset, shdr_plt.sh_entsize,
  731. STB_GLOBAL, sympltname);
  732. if (!f)
  733. goto out_elf_end;
  734. if (filter && filter(map, f))
  735. symbol__delete(f);
  736. else {
  737. symbols__insert(&self->symbols[map->type], f);
  738. ++nr;
  739. }
  740. }
  741. } else if (shdr_rel_plt.sh_type == SHT_REL) {
  742. GElf_Rel pos_mem, *pos;
  743. elf_section__for_each_rel(reldata, pos, pos_mem, idx,
  744. nr_rel_entries) {
  745. symidx = GELF_R_SYM(pos->r_info);
  746. plt_offset += shdr_plt.sh_entsize;
  747. gelf_getsym(syms, symidx, &sym);
  748. snprintf(sympltname, sizeof(sympltname),
  749. "%s@plt", elf_sym__name(&sym, symstrs));
  750. f = symbol__new(plt_offset, shdr_plt.sh_entsize,
  751. STB_GLOBAL, sympltname);
  752. if (!f)
  753. goto out_elf_end;
  754. if (filter && filter(map, f))
  755. symbol__delete(f);
  756. else {
  757. symbols__insert(&self->symbols[map->type], f);
  758. ++nr;
  759. }
  760. }
  761. }
  762. err = 0;
  763. out_elf_end:
  764. elf_end(elf);
  765. out_close:
  766. close(fd);
  767. if (err == 0)
  768. return nr;
  769. out:
  770. pr_debug("%s: problems reading %s PLT info.\n",
  771. __func__, self->long_name);
  772. return 0;
  773. }
  774. static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
  775. {
  776. switch (type) {
  777. case MAP__FUNCTION:
  778. return elf_sym__is_function(self);
  779. case MAP__VARIABLE:
  780. return elf_sym__is_object(self);
  781. default:
  782. return false;
  783. }
  784. }
  785. static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
  786. {
  787. switch (type) {
  788. case MAP__FUNCTION:
  789. return elf_sec__is_text(self, secstrs);
  790. case MAP__VARIABLE:
  791. return elf_sec__is_data(self, secstrs);
  792. default:
  793. return false;
  794. }
  795. }
  796. static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
  797. {
  798. Elf_Scn *sec = NULL;
  799. GElf_Shdr shdr;
  800. size_t cnt = 1;
  801. while ((sec = elf_nextscn(elf, sec)) != NULL) {
  802. gelf_getshdr(sec, &shdr);
  803. if ((addr >= shdr.sh_addr) &&
  804. (addr < (shdr.sh_addr + shdr.sh_size)))
  805. return cnt;
  806. ++cnt;
  807. }
  808. return -1;
  809. }
  810. static int dso__load_sym(struct dso *self, struct map *map, const char *name,
  811. int fd, symbol_filter_t filter, int kmodule,
  812. int want_symtab)
  813. {
  814. struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
  815. struct map *curr_map = map;
  816. struct dso *curr_dso = self;
  817. Elf_Data *symstrs, *secstrs;
  818. uint32_t nr_syms;
  819. int err = -1;
  820. uint32_t idx;
  821. GElf_Ehdr ehdr;
  822. GElf_Shdr shdr, opdshdr;
  823. Elf_Data *syms, *opddata = NULL;
  824. GElf_Sym sym;
  825. Elf_Scn *sec, *sec_strndx, *opdsec;
  826. Elf *elf;
  827. int nr = 0;
  828. size_t opdidx = 0;
  829. elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
  830. if (elf == NULL) {
  831. pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
  832. goto out_close;
  833. }
  834. if (gelf_getehdr(elf, &ehdr) == NULL) {
  835. pr_debug("%s: cannot get elf header.\n", __func__);
  836. goto out_elf_end;
  837. }
  838. /* Always reject images with a mismatched build-id: */
  839. if (self->has_build_id) {
  840. u8 build_id[BUILD_ID_SIZE];
  841. if (elf_read_build_id(elf, build_id,
  842. BUILD_ID_SIZE) != BUILD_ID_SIZE)
  843. goto out_elf_end;
  844. if (!dso__build_id_equal(self, build_id))
  845. goto out_elf_end;
  846. }
  847. sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
  848. if (sec == NULL) {
  849. if (want_symtab)
  850. goto out_elf_end;
  851. sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
  852. if (sec == NULL)
  853. goto out_elf_end;
  854. }
  855. opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
  856. if (opdsec)
  857. opddata = elf_rawdata(opdsec, NULL);
  858. syms = elf_getdata(sec, NULL);
  859. if (syms == NULL)
  860. goto out_elf_end;
  861. sec = elf_getscn(elf, shdr.sh_link);
  862. if (sec == NULL)
  863. goto out_elf_end;
  864. symstrs = elf_getdata(sec, NULL);
  865. if (symstrs == NULL)
  866. goto out_elf_end;
  867. sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
  868. if (sec_strndx == NULL)
  869. goto out_elf_end;
  870. secstrs = elf_getdata(sec_strndx, NULL);
  871. if (secstrs == NULL)
  872. goto out_elf_end;
  873. nr_syms = shdr.sh_size / shdr.sh_entsize;
  874. memset(&sym, 0, sizeof(sym));
  875. if (self->kernel == DSO_TYPE_USER) {
  876. self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
  877. elf_section_by_name(elf, &ehdr, &shdr,
  878. ".gnu.prelink_undo",
  879. NULL) != NULL);
  880. } else self->adjust_symbols = 0;
  881. elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
  882. struct symbol *f;
  883. const char *elf_name = elf_sym__name(&sym, symstrs);
  884. char *demangled = NULL;
  885. int is_label = elf_sym__is_label(&sym);
  886. const char *section_name;
  887. if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
  888. strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
  889. kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
  890. if (!is_label && !elf_sym__is_a(&sym, map->type))
  891. continue;
  892. /* Reject ARM ELF "mapping symbols": these aren't unique and
  893. * don't identify functions, so will confuse the profile
  894. * output: */
  895. if (ehdr.e_machine == EM_ARM) {
  896. if (!strcmp(elf_name, "$a") ||
  897. !strcmp(elf_name, "$d") ||
  898. !strcmp(elf_name, "$t"))
  899. continue;
  900. }
  901. if (opdsec && sym.st_shndx == opdidx) {
  902. u32 offset = sym.st_value - opdshdr.sh_addr;
  903. u64 *opd = opddata->d_buf + offset;
  904. sym.st_value = *opd;
  905. sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
  906. }
  907. sec = elf_getscn(elf, sym.st_shndx);
  908. if (!sec)
  909. goto out_elf_end;
  910. gelf_getshdr(sec, &shdr);
  911. if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
  912. continue;
  913. section_name = elf_sec__name(&shdr, secstrs);
  914. if (self->kernel != DSO_TYPE_USER || kmodule) {
  915. char dso_name[PATH_MAX];
  916. if (strcmp(section_name,
  917. (curr_dso->short_name +
  918. self->short_name_len)) == 0)
  919. goto new_symbol;
  920. if (strcmp(section_name, ".text") == 0) {
  921. curr_map = map;
  922. curr_dso = self;
  923. goto new_symbol;
  924. }
  925. snprintf(dso_name, sizeof(dso_name),
  926. "%s%s", self->short_name, section_name);
  927. curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
  928. if (curr_map == NULL) {
  929. u64 start = sym.st_value;
  930. if (kmodule)
  931. start += map->start + shdr.sh_offset;
  932. curr_dso = dso__new(dso_name);
  933. if (curr_dso == NULL)
  934. goto out_elf_end;
  935. curr_dso->kernel = self->kernel;
  936. curr_map = map__new2(start, curr_dso,
  937. map->type);
  938. if (curr_map == NULL) {
  939. dso__delete(curr_dso);
  940. goto out_elf_end;
  941. }
  942. curr_map->map_ip = identity__map_ip;
  943. curr_map->unmap_ip = identity__map_ip;
  944. curr_dso->origin = self->origin;
  945. map_groups__insert(kmap->kmaps, curr_map);
  946. dsos__add(&self->node, curr_dso);
  947. dso__set_loaded(curr_dso, map->type);
  948. } else
  949. curr_dso = curr_map->dso;
  950. goto new_symbol;
  951. }
  952. if (curr_dso->adjust_symbols) {
  953. pr_debug4("%s: adjusting symbol: st_value: %#Lx "
  954. "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
  955. (u64)sym.st_value, (u64)shdr.sh_addr,
  956. (u64)shdr.sh_offset);
  957. sym.st_value -= shdr.sh_addr - shdr.sh_offset;
  958. }
  959. /*
  960. * We need to figure out if the object was created from C++ sources
  961. * DWARF DW_compile_unit has this, but we don't always have access
  962. * to it...
  963. */
  964. demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
  965. if (demangled != NULL)
  966. elf_name = demangled;
  967. new_symbol:
  968. f = symbol__new(sym.st_value, sym.st_size,
  969. GELF_ST_BIND(sym.st_info), elf_name);
  970. free(demangled);
  971. if (!f)
  972. goto out_elf_end;
  973. if (filter && filter(curr_map, f))
  974. symbol__delete(f);
  975. else {
  976. symbols__insert(&curr_dso->symbols[curr_map->type], f);
  977. nr++;
  978. }
  979. }
  980. /*
  981. * For misannotated, zeroed, ASM function sizes.
  982. */
  983. if (nr > 0) {
  984. symbols__fixup_end(&self->symbols[map->type]);
  985. if (kmap) {
  986. /*
  987. * We need to fixup this here too because we create new
  988. * maps here, for things like vsyscall sections.
  989. */
  990. __map_groups__fixup_end(kmap->kmaps, map->type);
  991. }
  992. }
  993. err = nr;
  994. out_elf_end:
  995. elf_end(elf);
  996. out_close:
  997. return err;
  998. }
  999. static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
  1000. {
  1001. return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
  1002. }
  1003. bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
  1004. {
  1005. bool have_build_id = false;
  1006. struct dso *pos;
  1007. list_for_each_entry(pos, head, node) {
  1008. if (with_hits && !pos->hit)
  1009. continue;
  1010. if (pos->has_build_id) {
  1011. have_build_id = true;
  1012. continue;
  1013. }
  1014. if (filename__read_build_id(pos->long_name, pos->build_id,
  1015. sizeof(pos->build_id)) > 0) {
  1016. have_build_id = true;
  1017. pos->has_build_id = true;
  1018. }
  1019. }
  1020. return have_build_id;
  1021. }
  1022. /*
  1023. * Align offset to 4 bytes as needed for note name and descriptor data.
  1024. */
  1025. #define NOTE_ALIGN(n) (((n) + 3) & -4U)
  1026. static int elf_read_build_id(Elf *elf, void *bf, size_t size)
  1027. {
  1028. int err = -1;
  1029. GElf_Ehdr ehdr;
  1030. GElf_Shdr shdr;
  1031. Elf_Data *data;
  1032. Elf_Scn *sec;
  1033. Elf_Kind ek;
  1034. void *ptr;
  1035. if (size < BUILD_ID_SIZE)
  1036. goto out;
  1037. ek = elf_kind(elf);
  1038. if (ek != ELF_K_ELF)
  1039. goto out;
  1040. if (gelf_getehdr(elf, &ehdr) == NULL) {
  1041. pr_err("%s: cannot get elf header.\n", __func__);
  1042. goto out;
  1043. }
  1044. sec = elf_section_by_name(elf, &ehdr, &shdr,
  1045. ".note.gnu.build-id", NULL);
  1046. if (sec == NULL) {
  1047. sec = elf_section_by_name(elf, &ehdr, &shdr,
  1048. ".notes", NULL);
  1049. if (sec == NULL)
  1050. goto out;
  1051. }
  1052. data = elf_getdata(sec, NULL);
  1053. if (data == NULL)
  1054. goto out;
  1055. ptr = data->d_buf;
  1056. while (ptr < (data->d_buf + data->d_size)) {
  1057. GElf_Nhdr *nhdr = ptr;
  1058. int namesz = NOTE_ALIGN(nhdr->n_namesz),
  1059. descsz = NOTE_ALIGN(nhdr->n_descsz);
  1060. const char *name;
  1061. ptr += sizeof(*nhdr);
  1062. name = ptr;
  1063. ptr += namesz;
  1064. if (nhdr->n_type == NT_GNU_BUILD_ID &&
  1065. nhdr->n_namesz == sizeof("GNU")) {
  1066. if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
  1067. memcpy(bf, ptr, BUILD_ID_SIZE);
  1068. err = BUILD_ID_SIZE;
  1069. break;
  1070. }
  1071. }
  1072. ptr += descsz;
  1073. }
  1074. out:
  1075. return err;
  1076. }
  1077. int filename__read_build_id(const char *filename, void *bf, size_t size)
  1078. {
  1079. int fd, err = -1;
  1080. Elf *elf;
  1081. if (size < BUILD_ID_SIZE)
  1082. goto out;
  1083. fd = open(filename, O_RDONLY);
  1084. if (fd < 0)
  1085. goto out;
  1086. elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
  1087. if (elf == NULL) {
  1088. pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
  1089. goto out_close;
  1090. }
  1091. err = elf_read_build_id(elf, bf, size);
  1092. elf_end(elf);
  1093. out_close:
  1094. close(fd);
  1095. out:
  1096. return err;
  1097. }
  1098. int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
  1099. {
  1100. int fd, err = -1;
  1101. if (size < BUILD_ID_SIZE)
  1102. goto out;
  1103. fd = open(filename, O_RDONLY);
  1104. if (fd < 0)
  1105. goto out;
  1106. while (1) {
  1107. char bf[BUFSIZ];
  1108. GElf_Nhdr nhdr;
  1109. int namesz, descsz;
  1110. if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
  1111. break;
  1112. namesz = NOTE_ALIGN(nhdr.n_namesz);
  1113. descsz = NOTE_ALIGN(nhdr.n_descsz);
  1114. if (nhdr.n_type == NT_GNU_BUILD_ID &&
  1115. nhdr.n_namesz == sizeof("GNU")) {
  1116. if (read(fd, bf, namesz) != namesz)
  1117. break;
  1118. if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
  1119. if (read(fd, build_id,
  1120. BUILD_ID_SIZE) == BUILD_ID_SIZE) {
  1121. err = 0;
  1122. break;
  1123. }
  1124. } else if (read(fd, bf, descsz) != descsz)
  1125. break;
  1126. } else {
  1127. int n = namesz + descsz;
  1128. if (read(fd, bf, n) != n)
  1129. break;
  1130. }
  1131. }
  1132. close(fd);
  1133. out:
  1134. return err;
  1135. }
  1136. char dso__symtab_origin(const struct dso *self)
  1137. {
  1138. static const char origin[] = {
  1139. [DSO__ORIG_KERNEL] = 'k',
  1140. [DSO__ORIG_JAVA_JIT] = 'j',
  1141. [DSO__ORIG_BUILD_ID_CACHE] = 'B',
  1142. [DSO__ORIG_FEDORA] = 'f',
  1143. [DSO__ORIG_UBUNTU] = 'u',
  1144. [DSO__ORIG_BUILDID] = 'b',
  1145. [DSO__ORIG_DSO] = 'd',
  1146. [DSO__ORIG_KMODULE] = 'K',
  1147. [DSO__ORIG_GUEST_KERNEL] = 'g',
  1148. [DSO__ORIG_GUEST_KMODULE] = 'G',
  1149. };
  1150. if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
  1151. return '!';
  1152. return origin[self->origin];
  1153. }
  1154. int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
  1155. {
  1156. int size = PATH_MAX;
  1157. char *name;
  1158. int ret = -1;
  1159. int fd;
  1160. struct machine *machine;
  1161. const char *root_dir;
  1162. int want_symtab;
  1163. dso__set_loaded(self, map->type);
  1164. if (self->kernel == DSO_TYPE_KERNEL)
  1165. return dso__load_kernel_sym(self, map, filter);
  1166. else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
  1167. return dso__load_guest_kernel_sym(self, map, filter);
  1168. if (map->groups && map->groups->machine)
  1169. machine = map->groups->machine;
  1170. else
  1171. machine = NULL;
  1172. name = malloc(size);
  1173. if (!name)
  1174. return -1;
  1175. self->adjust_symbols = 0;
  1176. if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
  1177. ret = dso__load_perf_map(self, map, filter);
  1178. self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
  1179. DSO__ORIG_NOT_FOUND;
  1180. return ret;
  1181. }
  1182. /* Iterate over candidate debug images.
  1183. * On the first pass, only load images if they have a full symtab.
  1184. * Failing that, do a second pass where we accept .dynsym also
  1185. */
  1186. for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1;
  1187. self->origin != DSO__ORIG_NOT_FOUND;
  1188. self->origin++) {
  1189. switch (self->origin) {
  1190. case DSO__ORIG_BUILD_ID_CACHE:
  1191. if (dso__build_id_filename(self, name, size) == NULL)
  1192. continue;
  1193. break;
  1194. case DSO__ORIG_FEDORA:
  1195. snprintf(name, size, "/usr/lib/debug%s.debug",
  1196. self->long_name);
  1197. break;
  1198. case DSO__ORIG_UBUNTU:
  1199. snprintf(name, size, "/usr/lib/debug%s",
  1200. self->long_name);
  1201. break;
  1202. case DSO__ORIG_BUILDID: {
  1203. char build_id_hex[BUILD_ID_SIZE * 2 + 1];
  1204. if (!self->has_build_id)
  1205. continue;
  1206. build_id__sprintf(self->build_id,
  1207. sizeof(self->build_id),
  1208. build_id_hex);
  1209. snprintf(name, size,
  1210. "/usr/lib/debug/.build-id/%.2s/%s.debug",
  1211. build_id_hex, build_id_hex + 2);
  1212. }
  1213. break;
  1214. case DSO__ORIG_DSO:
  1215. snprintf(name, size, "%s", self->long_name);
  1216. break;
  1217. case DSO__ORIG_GUEST_KMODULE:
  1218. if (map->groups && map->groups->machine)
  1219. root_dir = map->groups->machine->root_dir;
  1220. else
  1221. root_dir = "";
  1222. snprintf(name, size, "%s%s", root_dir, self->long_name);
  1223. break;
  1224. default:
  1225. /*
  1226. * If we wanted a full symtab but no image had one,
  1227. * relax our requirements and repeat the search.
  1228. */
  1229. if (want_symtab) {
  1230. want_symtab = 0;
  1231. self->origin = DSO__ORIG_BUILD_ID_CACHE;
  1232. } else
  1233. continue;
  1234. }
  1235. /* Name is now the name of the next image to try */
  1236. fd = open(name, O_RDONLY);
  1237. if (fd < 0)
  1238. continue;
  1239. ret = dso__load_sym(self, map, name, fd, filter, 0,
  1240. want_symtab);
  1241. close(fd);
  1242. /*
  1243. * Some people seem to have debuginfo files _WITHOUT_ debug
  1244. * info!?!?
  1245. */
  1246. if (!ret)
  1247. continue;
  1248. if (ret > 0) {
  1249. int nr_plt = dso__synthesize_plt_symbols(self, map, filter);
  1250. if (nr_plt > 0)
  1251. ret += nr_plt;
  1252. break;
  1253. }
  1254. }
  1255. free(name);
  1256. if (ret < 0 && strstr(self->name, " (deleted)") != NULL)
  1257. return 0;
  1258. return ret;
  1259. }
  1260. struct map *map_groups__find_by_name(struct map_groups *self,
  1261. enum map_type type, const char *name)
  1262. {
  1263. struct rb_node *nd;
  1264. for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
  1265. struct map *map = rb_entry(nd, struct map, rb_node);
  1266. if (map->dso && strcmp(map->dso->short_name, name) == 0)
  1267. return map;
  1268. }
  1269. return NULL;
  1270. }
  1271. static int dso__kernel_module_get_build_id(struct dso *self,
  1272. const char *root_dir)
  1273. {
  1274. char filename[PATH_MAX];
  1275. /*
  1276. * kernel module short names are of the form "[module]" and
  1277. * we need just "module" here.
  1278. */
  1279. const char *name = self->short_name + 1;
  1280. snprintf(filename, sizeof(filename),
  1281. "%s/sys/module/%.*s/notes/.note.gnu.build-id",
  1282. root_dir, (int)strlen(name) - 1, name);
  1283. if (sysfs__read_build_id(filename, self->build_id,
  1284. sizeof(self->build_id)) == 0)
  1285. self->has_build_id = true;
  1286. return 0;
  1287. }
  1288. static int map_groups__set_modules_path_dir(struct map_groups *self,
  1289. const char *dir_name)
  1290. {
  1291. struct dirent *dent;
  1292. DIR *dir = opendir(dir_name);
  1293. int ret = 0;
  1294. if (!dir) {
  1295. pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
  1296. return -1;
  1297. }
  1298. while ((dent = readdir(dir)) != NULL) {
  1299. char path[PATH_MAX];
  1300. struct stat st;
  1301. /*sshfs might return bad dent->d_type, so we have to stat*/
  1302. sprintf(path, "%s/%s", dir_name, dent->d_name);
  1303. if (stat(path, &st))
  1304. continue;
  1305. if (S_ISDIR(st.st_mode)) {
  1306. if (!strcmp(dent->d_name, ".") ||
  1307. !strcmp(dent->d_name, ".."))
  1308. continue;
  1309. snprintf(path, sizeof(path), "%s/%s",
  1310. dir_name, dent->d_name);
  1311. ret = map_groups__set_modules_path_dir(self, path);
  1312. if (ret < 0)
  1313. goto out;
  1314. } else {
  1315. char *dot = strrchr(dent->d_name, '.'),
  1316. dso_name[PATH_MAX];
  1317. struct map *map;
  1318. char *long_name;
  1319. if (dot == NULL || strcmp(dot, ".ko"))
  1320. continue;
  1321. snprintf(dso_name, sizeof(dso_name), "[%.*s]",
  1322. (int)(dot - dent->d_name), dent->d_name);
  1323. strxfrchar(dso_name, '-', '_');
  1324. map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
  1325. if (map == NULL)
  1326. continue;
  1327. snprintf(path, sizeof(path), "%s/%s",
  1328. dir_name, dent->d_name);
  1329. long_name = strdup(path);
  1330. if (long_name == NULL) {
  1331. ret = -1;
  1332. goto out;
  1333. }
  1334. dso__set_long_name(map->dso, long_name);
  1335. map->dso->lname_alloc = 1;
  1336. dso__kernel_module_get_build_id(map->dso, "");
  1337. }
  1338. }
  1339. out:
  1340. closedir(dir);
  1341. return ret;
  1342. }
  1343. static char *get_kernel_version(const char *root_dir)
  1344. {
  1345. char version[PATH_MAX];
  1346. FILE *file;
  1347. char *name, *tmp;
  1348. const char *prefix = "Linux version ";
  1349. sprintf(version, "%s/proc/version", root_dir);
  1350. file = fopen(version, "r");
  1351. if (!file)
  1352. return NULL;
  1353. version[0] = '\0';
  1354. tmp = fgets(version, sizeof(version), file);
  1355. fclose(file);
  1356. name = strstr(version, prefix);
  1357. if (!name)
  1358. return NULL;
  1359. name += strlen(prefix);
  1360. tmp = strchr(name, ' ');
  1361. if (tmp)
  1362. *tmp = '\0';
  1363. return strdup(name);
  1364. }
  1365. static int machine__set_modules_path(struct machine *self)
  1366. {
  1367. char *version;
  1368. char modules_path[PATH_MAX];
  1369. version = get_kernel_version(self->root_dir);
  1370. if (!version)
  1371. return -1;
  1372. snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
  1373. self->root_dir, version);
  1374. free(version);
  1375. return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
  1376. }
  1377. /*
  1378. * Constructor variant for modules (where we know from /proc/modules where
  1379. * they are loaded) and for vmlinux, where only after we load all the
  1380. * symbols we'll know where it starts and ends.
  1381. */
  1382. static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
  1383. {
  1384. struct map *self = calloc(1, (sizeof(*self) +
  1385. (dso->kernel ? sizeof(struct kmap) : 0)));
  1386. if (self != NULL) {
  1387. /*
  1388. * ->end will be filled after we load all the symbols
  1389. */
  1390. map__init(self, type, start, 0, 0, dso);
  1391. }
  1392. return self;
  1393. }
  1394. struct map *machine__new_module(struct machine *self, u64 start,
  1395. const char *filename)
  1396. {
  1397. struct map *map;
  1398. struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
  1399. if (dso == NULL)
  1400. return NULL;
  1401. map = map__new2(start, dso, MAP__FUNCTION);
  1402. if (map == NULL)
  1403. return NULL;
  1404. if (machine__is_host(self))
  1405. dso->origin = DSO__ORIG_KMODULE;
  1406. else
  1407. dso->origin = DSO__ORIG_GUEST_KMODULE;
  1408. map_groups__insert(&self->kmaps, map);
  1409. return map;
  1410. }
  1411. static int machine__create_modules(struct machine *self)
  1412. {
  1413. char *line = NULL;
  1414. size_t n;
  1415. FILE *file;
  1416. struct map *map;
  1417. const char *modules;
  1418. char path[PATH_MAX];
  1419. if (machine__is_default_guest(self))
  1420. modules = symbol_conf.default_guest_modules;
  1421. else {
  1422. sprintf(path, "%s/proc/modules", self->root_dir);
  1423. modules = path;
  1424. }
  1425. file = fopen(modules, "r");
  1426. if (file == NULL)
  1427. return -1;
  1428. while (!feof(file)) {
  1429. char name[PATH_MAX];
  1430. u64 start;
  1431. char *sep;
  1432. int line_len;
  1433. line_len = getline(&line, &n, file);
  1434. if (line_len < 0)
  1435. break;
  1436. if (!line)
  1437. goto out_failure;
  1438. line[--line_len] = '\0'; /* \n */
  1439. sep = strrchr(line, 'x');
  1440. if (sep == NULL)
  1441. continue;
  1442. hex2u64(sep + 1, &start);
  1443. sep = strchr(line, ' ');
  1444. if (sep == NULL)
  1445. continue;
  1446. *sep = '\0';
  1447. snprintf(name, sizeof(name), "[%s]", line);
  1448. map = machine__new_module(self, start, name);
  1449. if (map == NULL)
  1450. goto out_delete_line;
  1451. dso__kernel_module_get_build_id(map->dso, self->root_dir);
  1452. }
  1453. free(line);
  1454. fclose(file);
  1455. return machine__set_modules_path(self);
  1456. out_delete_line:
  1457. free(line);
  1458. out_failure:
  1459. return -1;
  1460. }
  1461. static int dso__load_vmlinux(struct dso *self, struct map *map,
  1462. const char *vmlinux, symbol_filter_t filter)
  1463. {
  1464. int err = -1, fd;
  1465. fd = open(vmlinux, O_RDONLY);
  1466. if (fd < 0)
  1467. return -1;
  1468. dso__set_loaded(self, map->type);
  1469. err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0);
  1470. close(fd);
  1471. if (err > 0)
  1472. pr_debug("Using %s for symbols\n", vmlinux);
  1473. return err;
  1474. }
  1475. int dso__load_vmlinux_path(struct dso *self, struct map *map,
  1476. symbol_filter_t filter)
  1477. {
  1478. int i, err = 0;
  1479. char *filename;
  1480. pr_debug("Looking at the vmlinux_path (%d entries long)\n",
  1481. vmlinux_path__nr_entries + 1);
  1482. filename = dso__build_id_filename(self, NULL, 0);
  1483. if (filename != NULL) {
  1484. err = dso__load_vmlinux(self, map, filename, filter);
  1485. if (err > 0) {
  1486. dso__set_long_name(self, filename);
  1487. goto out;
  1488. }
  1489. free(filename);
  1490. }
  1491. for (i = 0; i < vmlinux_path__nr_entries; ++i) {
  1492. err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
  1493. if (err > 0) {
  1494. dso__set_long_name(self, strdup(vmlinux_path[i]));
  1495. break;
  1496. }
  1497. }
  1498. out:
  1499. return err;
  1500. }
  1501. static int dso__load_kernel_sym(struct dso *self, struct map *map,
  1502. symbol_filter_t filter)
  1503. {
  1504. int err;
  1505. const char *kallsyms_filename = NULL;
  1506. char *kallsyms_allocated_filename = NULL;
  1507. /*
  1508. * Step 1: if the user specified a vmlinux filename, use it and only
  1509. * it, reporting errors to the user if it cannot be used.
  1510. *
  1511. * For instance, try to analyse an ARM perf.data file _without_ a
  1512. * build-id, or if the user specifies the wrong path to the right
  1513. * vmlinux file, obviously we can't fallback to another vmlinux (a
  1514. * x86_86 one, on the machine where analysis is being performed, say),
  1515. * or worse, /proc/kallsyms.
  1516. *
  1517. * If the specified file _has_ a build-id and there is a build-id
  1518. * section in the perf.data file, we will still do the expected
  1519. * validation in dso__load_vmlinux and will bail out if they don't
  1520. * match.
  1521. */
  1522. if (symbol_conf.vmlinux_name != NULL) {
  1523. err = dso__load_vmlinux(self, map,
  1524. symbol_conf.vmlinux_name, filter);
  1525. if (err > 0) {
  1526. dso__set_long_name(self,
  1527. strdup(symbol_conf.vmlinux_name));
  1528. goto out_fixup;
  1529. }
  1530. return err;
  1531. }
  1532. if (vmlinux_path != NULL) {
  1533. err = dso__load_vmlinux_path(self, map, filter);
  1534. if (err > 0)
  1535. goto out_fixup;
  1536. }
  1537. /*
  1538. * Say the kernel DSO was created when processing the build-id header table,
  1539. * we have a build-id, so check if it is the same as the running kernel,
  1540. * using it if it is.
  1541. */
  1542. if (self->has_build_id) {
  1543. u8 kallsyms_build_id[BUILD_ID_SIZE];
  1544. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1545. if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
  1546. sizeof(kallsyms_build_id)) == 0) {
  1547. if (dso__build_id_equal(self, kallsyms_build_id)) {
  1548. kallsyms_filename = "/proc/kallsyms";
  1549. goto do_kallsyms;
  1550. }
  1551. }
  1552. /*
  1553. * Now look if we have it on the build-id cache in
  1554. * $HOME/.debug/[kernel.kallsyms].
  1555. */
  1556. build_id__sprintf(self->build_id, sizeof(self->build_id),
  1557. sbuild_id);
  1558. if (asprintf(&kallsyms_allocated_filename,
  1559. "%s/.debug/[kernel.kallsyms]/%s",
  1560. getenv("HOME"), sbuild_id) == -1) {
  1561. pr_err("Not enough memory for kallsyms file lookup\n");
  1562. return -1;
  1563. }
  1564. kallsyms_filename = kallsyms_allocated_filename;
  1565. if (access(kallsyms_filename, F_OK)) {
  1566. pr_err("No kallsyms or vmlinux with build-id %s "
  1567. "was found\n", sbuild_id);
  1568. free(kallsyms_allocated_filename);
  1569. return -1;
  1570. }
  1571. } else {
  1572. /*
  1573. * Last resort, if we don't have a build-id and couldn't find
  1574. * any vmlinux file, try the running kernel kallsyms table.
  1575. */
  1576. kallsyms_filename = "/proc/kallsyms";
  1577. }
  1578. do_kallsyms:
  1579. err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
  1580. if (err > 0)
  1581. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1582. free(kallsyms_allocated_filename);
  1583. if (err > 0) {
  1584. out_fixup:
  1585. if (kallsyms_filename != NULL)
  1586. dso__set_long_name(self, strdup("[kernel.kallsyms]"));
  1587. map__fixup_start(map);
  1588. map__fixup_end(map);
  1589. }
  1590. return err;
  1591. }
  1592. static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
  1593. symbol_filter_t filter)
  1594. {
  1595. int err;
  1596. const char *kallsyms_filename = NULL;
  1597. struct machine *machine;
  1598. char path[PATH_MAX];
  1599. if (!map->groups) {
  1600. pr_debug("Guest kernel map hasn't the point to groups\n");
  1601. return -1;
  1602. }
  1603. machine = map->groups->machine;
  1604. if (machine__is_default_guest(machine)) {
  1605. /*
  1606. * if the user specified a vmlinux filename, use it and only
  1607. * it, reporting errors to the user if it cannot be used.
  1608. * Or use file guest_kallsyms inputted by user on commandline
  1609. */
  1610. if (symbol_conf.default_guest_vmlinux_name != NULL) {
  1611. err = dso__load_vmlinux(self, map,
  1612. symbol_conf.default_guest_vmlinux_name, filter);
  1613. goto out_try_fixup;
  1614. }
  1615. kallsyms_filename = symbol_conf.default_guest_kallsyms;
  1616. if (!kallsyms_filename)
  1617. return -1;
  1618. } else {
  1619. sprintf(path, "%s/proc/kallsyms", machine->root_dir);
  1620. kallsyms_filename = path;
  1621. }
  1622. err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
  1623. if (err > 0)
  1624. pr_debug("Using %s for symbols\n", kallsyms_filename);
  1625. out_try_fixup:
  1626. if (err > 0) {
  1627. if (kallsyms_filename != NULL) {
  1628. machine__mmap_name(machine, path, sizeof(path));
  1629. dso__set_long_name(self, strdup(path));
  1630. }
  1631. map__fixup_start(map);
  1632. map__fixup_end(map);
  1633. }
  1634. return err;
  1635. }
  1636. static void dsos__add(struct list_head *head, struct dso *dso)
  1637. {
  1638. list_add_tail(&dso->node, head);
  1639. }
  1640. static struct dso *dsos__find(struct list_head *head, const char *name)
  1641. {
  1642. struct dso *pos;
  1643. list_for_each_entry(pos, head, node)
  1644. if (strcmp(pos->long_name, name) == 0)
  1645. return pos;
  1646. return NULL;
  1647. }
  1648. struct dso *__dsos__findnew(struct list_head *head, const char *name)
  1649. {
  1650. struct dso *dso = dsos__find(head, name);
  1651. if (!dso) {
  1652. dso = dso__new(name);
  1653. if (dso != NULL) {
  1654. dsos__add(head, dso);
  1655. dso__set_basename(dso);
  1656. }
  1657. }
  1658. return dso;
  1659. }
  1660. size_t __dsos__fprintf(struct list_head *head, FILE *fp)
  1661. {
  1662. struct dso *pos;
  1663. size_t ret = 0;
  1664. list_for_each_entry(pos, head, node) {
  1665. int i;
  1666. for (i = 0; i < MAP__NR_TYPES; ++i)
  1667. ret += dso__fprintf(pos, i, fp);
  1668. }
  1669. return ret;
  1670. }
  1671. size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
  1672. {
  1673. struct rb_node *nd;
  1674. size_t ret = 0;
  1675. for (nd = rb_first(self); nd; nd = rb_next(nd)) {
  1676. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  1677. ret += __dsos__fprintf(&pos->kernel_dsos, fp);
  1678. ret += __dsos__fprintf(&pos->user_dsos, fp);
  1679. }
  1680. return ret;
  1681. }
  1682. static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
  1683. bool with_hits)
  1684. {
  1685. struct dso *pos;
  1686. size_t ret = 0;
  1687. list_for_each_entry(pos, head, node) {
  1688. if (with_hits && !pos->hit)
  1689. continue;
  1690. ret += dso__fprintf_buildid(pos, fp);
  1691. ret += fprintf(fp, " %s\n", pos->long_name);
  1692. }
  1693. return ret;
  1694. }
  1695. size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
  1696. {
  1697. return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
  1698. __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
  1699. }
  1700. size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
  1701. {
  1702. struct rb_node *nd;
  1703. size_t ret = 0;
  1704. for (nd = rb_first(self); nd; nd = rb_next(nd)) {
  1705. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  1706. ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
  1707. }
  1708. return ret;
  1709. }
  1710. struct dso *dso__new_kernel(const char *name)
  1711. {
  1712. struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
  1713. if (self != NULL) {
  1714. dso__set_short_name(self, "[kernel]");
  1715. self->kernel = DSO_TYPE_KERNEL;
  1716. }
  1717. return self;
  1718. }
  1719. static struct dso *dso__new_guest_kernel(struct machine *machine,
  1720. const char *name)
  1721. {
  1722. char bf[PATH_MAX];
  1723. struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
  1724. if (self != NULL) {
  1725. dso__set_short_name(self, "[guest.kernel]");
  1726. self->kernel = DSO_TYPE_GUEST_KERNEL;
  1727. }
  1728. return self;
  1729. }
  1730. void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
  1731. {
  1732. char path[PATH_MAX];
  1733. if (machine__is_default_guest(machine))
  1734. return;
  1735. sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
  1736. if (sysfs__read_build_id(path, self->build_id,
  1737. sizeof(self->build_id)) == 0)
  1738. self->has_build_id = true;
  1739. }
  1740. static struct dso *machine__create_kernel(struct machine *self)
  1741. {
  1742. const char *vmlinux_name = NULL;
  1743. struct dso *kernel;
  1744. if (machine__is_host(self)) {
  1745. vmlinux_name = symbol_conf.vmlinux_name;
  1746. kernel = dso__new_kernel(vmlinux_name);
  1747. } else {
  1748. if (machine__is_default_guest(self))
  1749. vmlinux_name = symbol_conf.default_guest_vmlinux_name;
  1750. kernel = dso__new_guest_kernel(self, vmlinux_name);
  1751. }
  1752. if (kernel != NULL) {
  1753. dso__read_running_kernel_build_id(kernel, self);
  1754. dsos__add(&self->kernel_dsos, kernel);
  1755. }
  1756. return kernel;
  1757. }
  1758. int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
  1759. {
  1760. enum map_type type;
  1761. for (type = 0; type < MAP__NR_TYPES; ++type) {
  1762. struct kmap *kmap;
  1763. self->vmlinux_maps[type] = map__new2(0, kernel, type);
  1764. if (self->vmlinux_maps[type] == NULL)
  1765. return -1;
  1766. self->vmlinux_maps[type]->map_ip =
  1767. self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
  1768. kmap = map__kmap(self->vmlinux_maps[type]);
  1769. kmap->kmaps = &self->kmaps;
  1770. map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
  1771. }
  1772. return 0;
  1773. }
  1774. void machine__destroy_kernel_maps(struct machine *self)
  1775. {
  1776. enum map_type type;
  1777. for (type = 0; type < MAP__NR_TYPES; ++type) {
  1778. struct kmap *kmap;
  1779. if (self->vmlinux_maps[type] == NULL)
  1780. continue;
  1781. kmap = map__kmap(self->vmlinux_maps[type]);
  1782. map_groups__remove(&self->kmaps, self->vmlinux_maps[type]);
  1783. if (kmap->ref_reloc_sym) {
  1784. /*
  1785. * ref_reloc_sym is shared among all maps, so free just
  1786. * on one of them.
  1787. */
  1788. if (type == MAP__FUNCTION) {
  1789. free((char *)kmap->ref_reloc_sym->name);
  1790. kmap->ref_reloc_sym->name = NULL;
  1791. free(kmap->ref_reloc_sym);
  1792. }
  1793. kmap->ref_reloc_sym = NULL;
  1794. }
  1795. map__delete(self->vmlinux_maps[type]);
  1796. self->vmlinux_maps[type] = NULL;
  1797. }
  1798. }
  1799. int machine__create_kernel_maps(struct machine *self)
  1800. {
  1801. struct dso *kernel = machine__create_kernel(self);
  1802. if (kernel == NULL ||
  1803. __machine__create_kernel_maps(self, kernel) < 0)
  1804. return -1;
  1805. if (symbol_conf.use_modules && machine__create_modules(self) < 0)
  1806. pr_debug("Problems creating module maps, continuing anyway...\n");
  1807. /*
  1808. * Now that we have all the maps created, just set the ->end of them:
  1809. */
  1810. map_groups__fixup_end(&self->kmaps);
  1811. return 0;
  1812. }
  1813. static void vmlinux_path__exit(void)
  1814. {
  1815. while (--vmlinux_path__nr_entries >= 0) {
  1816. free(vmlinux_path[vmlinux_path__nr_entries]);
  1817. vmlinux_path[vmlinux_path__nr_entries] = NULL;
  1818. }
  1819. free(vmlinux_path);
  1820. vmlinux_path = NULL;
  1821. }
  1822. static int vmlinux_path__init(void)
  1823. {
  1824. struct utsname uts;
  1825. char bf[PATH_MAX];
  1826. if (uname(&uts) < 0)
  1827. return -1;
  1828. vmlinux_path = malloc(sizeof(char *) * 5);
  1829. if (vmlinux_path == NULL)
  1830. return -1;
  1831. vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
  1832. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1833. goto out_fail;
  1834. ++vmlinux_path__nr_entries;
  1835. vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
  1836. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1837. goto out_fail;
  1838. ++vmlinux_path__nr_entries;
  1839. snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
  1840. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1841. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1842. goto out_fail;
  1843. ++vmlinux_path__nr_entries;
  1844. snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
  1845. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1846. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1847. goto out_fail;
  1848. ++vmlinux_path__nr_entries;
  1849. snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
  1850. uts.release);
  1851. vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
  1852. if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
  1853. goto out_fail;
  1854. ++vmlinux_path__nr_entries;
  1855. return 0;
  1856. out_fail:
  1857. vmlinux_path__exit();
  1858. return -1;
  1859. }
  1860. size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
  1861. {
  1862. int i;
  1863. size_t printed = 0;
  1864. struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
  1865. if (kdso->has_build_id) {
  1866. char filename[PATH_MAX];
  1867. if (dso__build_id_filename(kdso, filename, sizeof(filename)))
  1868. printed += fprintf(fp, "[0] %s\n", filename);
  1869. }
  1870. for (i = 0; i < vmlinux_path__nr_entries; ++i)
  1871. printed += fprintf(fp, "[%d] %s\n",
  1872. i + kdso->has_build_id, vmlinux_path[i]);
  1873. return printed;
  1874. }
  1875. static int setup_list(struct strlist **list, const char *list_str,
  1876. const char *list_name)
  1877. {
  1878. if (list_str == NULL)
  1879. return 0;
  1880. *list = strlist__new(true, list_str);
  1881. if (!*list) {
  1882. pr_err("problems parsing %s list\n", list_name);
  1883. return -1;
  1884. }
  1885. return 0;
  1886. }
  1887. int symbol__init(void)
  1888. {
  1889. elf_version(EV_CURRENT);
  1890. if (symbol_conf.sort_by_name)
  1891. symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
  1892. sizeof(struct symbol));
  1893. if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
  1894. return -1;
  1895. if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
  1896. pr_err("'.' is the only non valid --field-separator argument\n");
  1897. return -1;
  1898. }
  1899. if (setup_list(&symbol_conf.dso_list,
  1900. symbol_conf.dso_list_str, "dso") < 0)
  1901. return -1;
  1902. if (setup_list(&symbol_conf.comm_list,
  1903. symbol_conf.comm_list_str, "comm") < 0)
  1904. goto out_free_dso_list;
  1905. if (setup_list(&symbol_conf.sym_list,
  1906. symbol_conf.sym_list_str, "symbol") < 0)
  1907. goto out_free_comm_list;
  1908. return 0;
  1909. out_free_dso_list:
  1910. strlist__delete(symbol_conf.dso_list);
  1911. out_free_comm_list:
  1912. strlist__delete(symbol_conf.comm_list);
  1913. return -1;
  1914. }
  1915. void symbol__exit(void)
  1916. {
  1917. strlist__delete(symbol_conf.sym_list);
  1918. strlist__delete(symbol_conf.dso_list);
  1919. strlist__delete(symbol_conf.comm_list);
  1920. vmlinux_path__exit();
  1921. symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
  1922. }
  1923. int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
  1924. {
  1925. struct machine *machine = machines__findnew(self, pid);
  1926. if (machine == NULL)
  1927. return -1;
  1928. return machine__create_kernel_maps(machine);
  1929. }
  1930. static int hex(char ch)
  1931. {
  1932. if ((ch >= '0') && (ch <= '9'))
  1933. return ch - '0';
  1934. if ((ch >= 'a') && (ch <= 'f'))
  1935. return ch - 'a' + 10;
  1936. if ((ch >= 'A') && (ch <= 'F'))
  1937. return ch - 'A' + 10;
  1938. return -1;
  1939. }
  1940. /*
  1941. * While we find nice hex chars, build a long_val.
  1942. * Return number of chars processed.
  1943. */
  1944. int hex2u64(const char *ptr, u64 *long_val)
  1945. {
  1946. const char *p = ptr;
  1947. *long_val = 0;
  1948. while (*p) {
  1949. const int hex_val = hex(*p);
  1950. if (hex_val < 0)
  1951. break;
  1952. *long_val = (*long_val << 4) | hex_val;
  1953. p++;
  1954. }
  1955. return p - ptr;
  1956. }
  1957. char *strxfrchar(char *s, char from, char to)
  1958. {
  1959. char *p = s;
  1960. while ((p = strchr(p, from)) != NULL)
  1961. *p++ = to;
  1962. return s;
  1963. }
  1964. int machines__create_guest_kernel_maps(struct rb_root *self)
  1965. {
  1966. int ret = 0;
  1967. struct dirent **namelist = NULL;
  1968. int i, items = 0;
  1969. char path[PATH_MAX];
  1970. pid_t pid;
  1971. if (symbol_conf.default_guest_vmlinux_name ||
  1972. symbol_conf.default_guest_modules ||
  1973. symbol_conf.default_guest_kallsyms) {
  1974. machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
  1975. }
  1976. if (symbol_conf.guestmount) {
  1977. items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
  1978. if (items <= 0)
  1979. return -ENOENT;
  1980. for (i = 0; i < items; i++) {
  1981. if (!isdigit(namelist[i]->d_name[0])) {
  1982. /* Filter out . and .. */
  1983. continue;
  1984. }
  1985. pid = atoi(namelist[i]->d_name);
  1986. sprintf(path, "%s/%s/proc/kallsyms",
  1987. symbol_conf.guestmount,
  1988. namelist[i]->d_name);
  1989. ret = access(path, R_OK);
  1990. if (ret) {
  1991. pr_debug("Can't access file %s\n", path);
  1992. goto failure;
  1993. }
  1994. machines__create_kernel_maps(self, pid);
  1995. }
  1996. failure:
  1997. free(namelist);
  1998. }
  1999. return ret;
  2000. }
  2001. void machines__destroy_guest_kernel_maps(struct rb_root *self)
  2002. {
  2003. struct rb_node *next = rb_first(self);
  2004. while (next) {
  2005. struct machine *pos = rb_entry(next, struct machine, rb_node);
  2006. next = rb_next(&pos->rb_node);
  2007. rb_erase(&pos->rb_node, self);
  2008. machine__delete(pos);
  2009. }
  2010. }
  2011. int machine__load_kallsyms(struct machine *self, const char *filename,
  2012. enum map_type type, symbol_filter_t filter)
  2013. {
  2014. struct map *map = self->vmlinux_maps[type];
  2015. int ret = dso__load_kallsyms(map->dso, filename, map, filter);
  2016. if (ret > 0) {
  2017. dso__set_loaded(map->dso, type);
  2018. /*
  2019. * Since /proc/kallsyms will have multiple sessions for the
  2020. * kernel, with modules between them, fixup the end of all
  2021. * sections.
  2022. */
  2023. __map_groups__fixup_end(&self->kmaps, type);
  2024. }
  2025. return ret;
  2026. }
  2027. int machine__load_vmlinux_path(struct machine *self, enum map_type type,
  2028. symbol_filter_t filter)
  2029. {
  2030. struct map *map = self->vmlinux_maps[type];
  2031. int ret = dso__load_vmlinux_path(map->dso, map, filter);
  2032. if (ret > 0) {
  2033. dso__set_loaded(map->dso, type);
  2034. map__reloc_vmlinux(map);
  2035. }
  2036. return ret;
  2037. }