header.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. #define _FILE_OFFSET_BITS 64
  2. #include <sys/types.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <linux/list.h>
  8. #include <linux/kernel.h>
  9. #include "evlist.h"
  10. #include "evsel.h"
  11. #include "util.h"
  12. #include "header.h"
  13. #include "../perf.h"
  14. #include "trace-event.h"
  15. #include "session.h"
  16. #include "symbol.h"
  17. #include "debug.h"
  18. static bool no_buildid_cache = false;
  19. static int event_count;
  20. static struct perf_trace_event_type *events;
  21. int perf_header__push_event(u64 id, const char *name)
  22. {
  23. if (strlen(name) > MAX_EVENT_NAME)
  24. pr_warning("Event %s will be truncated\n", name);
  25. if (!events) {
  26. events = malloc(sizeof(struct perf_trace_event_type));
  27. if (events == NULL)
  28. return -ENOMEM;
  29. } else {
  30. struct perf_trace_event_type *nevents;
  31. nevents = realloc(events, (event_count + 1) * sizeof(*events));
  32. if (nevents == NULL)
  33. return -ENOMEM;
  34. events = nevents;
  35. }
  36. memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
  37. events[event_count].event_id = id;
  38. strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
  39. event_count++;
  40. return 0;
  41. }
  42. char *perf_header__find_event(u64 id)
  43. {
  44. int i;
  45. for (i = 0 ; i < event_count; i++) {
  46. if (events[i].event_id == id)
  47. return events[i].name;
  48. }
  49. return NULL;
  50. }
  51. static const char *__perf_magic = "PERFFILE";
  52. #define PERF_MAGIC (*(u64 *)__perf_magic)
  53. struct perf_file_attr {
  54. struct perf_event_attr attr;
  55. struct perf_file_section ids;
  56. };
  57. void perf_header__set_feat(struct perf_header *header, int feat)
  58. {
  59. set_bit(feat, header->adds_features);
  60. }
  61. void perf_header__clear_feat(struct perf_header *header, int feat)
  62. {
  63. clear_bit(feat, header->adds_features);
  64. }
  65. bool perf_header__has_feat(const struct perf_header *header, int feat)
  66. {
  67. return test_bit(feat, header->adds_features);
  68. }
  69. static int do_write(int fd, const void *buf, size_t size)
  70. {
  71. while (size) {
  72. int ret = write(fd, buf, size);
  73. if (ret < 0)
  74. return -errno;
  75. size -= ret;
  76. buf += ret;
  77. }
  78. return 0;
  79. }
  80. #define NAME_ALIGN 64
  81. static int write_padded(int fd, const void *bf, size_t count,
  82. size_t count_aligned)
  83. {
  84. static const char zero_buf[NAME_ALIGN];
  85. int err = do_write(fd, bf, count);
  86. if (!err)
  87. err = do_write(fd, zero_buf, count_aligned - count);
  88. return err;
  89. }
  90. #define dsos__for_each_with_build_id(pos, head) \
  91. list_for_each_entry(pos, head, node) \
  92. if (!pos->has_build_id) \
  93. continue; \
  94. else
  95. static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
  96. u16 misc, int fd)
  97. {
  98. struct dso *pos;
  99. dsos__for_each_with_build_id(pos, head) {
  100. int err;
  101. struct build_id_event b;
  102. size_t len;
  103. if (!pos->hit)
  104. continue;
  105. len = pos->long_name_len + 1;
  106. len = ALIGN(len, NAME_ALIGN);
  107. memset(&b, 0, sizeof(b));
  108. memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
  109. b.pid = pid;
  110. b.header.misc = misc;
  111. b.header.size = sizeof(b) + len;
  112. err = do_write(fd, &b, sizeof(b));
  113. if (err < 0)
  114. return err;
  115. err = write_padded(fd, pos->long_name,
  116. pos->long_name_len + 1, len);
  117. if (err < 0)
  118. return err;
  119. }
  120. return 0;
  121. }
  122. static int machine__write_buildid_table(struct machine *machine, int fd)
  123. {
  124. int err;
  125. u16 kmisc = PERF_RECORD_MISC_KERNEL,
  126. umisc = PERF_RECORD_MISC_USER;
  127. if (!machine__is_host(machine)) {
  128. kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
  129. umisc = PERF_RECORD_MISC_GUEST_USER;
  130. }
  131. err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
  132. kmisc, fd);
  133. if (err == 0)
  134. err = __dsos__write_buildid_table(&machine->user_dsos,
  135. machine->pid, umisc, fd);
  136. return err;
  137. }
  138. static int dsos__write_buildid_table(struct perf_header *header, int fd)
  139. {
  140. struct perf_session *session = container_of(header,
  141. struct perf_session, header);
  142. struct rb_node *nd;
  143. int err = machine__write_buildid_table(&session->host_machine, fd);
  144. if (err)
  145. return err;
  146. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  147. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  148. err = machine__write_buildid_table(pos, fd);
  149. if (err)
  150. break;
  151. }
  152. return err;
  153. }
  154. int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
  155. const char *name, bool is_kallsyms)
  156. {
  157. const size_t size = PATH_MAX;
  158. char *realname = realpath(name, NULL),
  159. *filename = malloc(size),
  160. *linkname = malloc(size), *targetname;
  161. int len, err = -1;
  162. if (realname == NULL || filename == NULL || linkname == NULL)
  163. goto out_free;
  164. len = snprintf(filename, size, "%s%s%s",
  165. debugdir, is_kallsyms ? "/" : "", realname);
  166. if (mkdir_p(filename, 0755))
  167. goto out_free;
  168. snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
  169. if (access(filename, F_OK)) {
  170. if (is_kallsyms) {
  171. if (copyfile("/proc/kallsyms", filename))
  172. goto out_free;
  173. } else if (link(realname, filename) && copyfile(name, filename))
  174. goto out_free;
  175. }
  176. len = snprintf(linkname, size, "%s/.build-id/%.2s",
  177. debugdir, sbuild_id);
  178. if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
  179. goto out_free;
  180. snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
  181. targetname = filename + strlen(debugdir) - 5;
  182. memcpy(targetname, "../..", 5);
  183. if (symlink(targetname, linkname) == 0)
  184. err = 0;
  185. out_free:
  186. free(realname);
  187. free(filename);
  188. free(linkname);
  189. return err;
  190. }
  191. static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
  192. const char *name, const char *debugdir,
  193. bool is_kallsyms)
  194. {
  195. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  196. build_id__sprintf(build_id, build_id_size, sbuild_id);
  197. return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
  198. }
  199. int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
  200. {
  201. const size_t size = PATH_MAX;
  202. char *filename = malloc(size),
  203. *linkname = malloc(size);
  204. int err = -1;
  205. if (filename == NULL || linkname == NULL)
  206. goto out_free;
  207. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  208. debugdir, sbuild_id, sbuild_id + 2);
  209. if (access(linkname, F_OK))
  210. goto out_free;
  211. if (readlink(linkname, filename, size) < 0)
  212. goto out_free;
  213. if (unlink(linkname))
  214. goto out_free;
  215. /*
  216. * Since the link is relative, we must make it absolute:
  217. */
  218. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  219. debugdir, sbuild_id, filename);
  220. if (unlink(linkname))
  221. goto out_free;
  222. err = 0;
  223. out_free:
  224. free(filename);
  225. free(linkname);
  226. return err;
  227. }
  228. static int dso__cache_build_id(struct dso *dso, const char *debugdir)
  229. {
  230. bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
  231. return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
  232. dso->long_name, debugdir, is_kallsyms);
  233. }
  234. static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
  235. {
  236. struct dso *pos;
  237. int err = 0;
  238. dsos__for_each_with_build_id(pos, head)
  239. if (dso__cache_build_id(pos, debugdir))
  240. err = -1;
  241. return err;
  242. }
  243. static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
  244. {
  245. int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
  246. ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
  247. return ret;
  248. }
  249. static int perf_session__cache_build_ids(struct perf_session *session)
  250. {
  251. struct rb_node *nd;
  252. int ret;
  253. char debugdir[PATH_MAX];
  254. snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
  255. if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
  256. return -1;
  257. ret = machine__cache_build_ids(&session->host_machine, debugdir);
  258. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  259. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  260. ret |= machine__cache_build_ids(pos, debugdir);
  261. }
  262. return ret ? -1 : 0;
  263. }
  264. static bool machine__read_build_ids(struct machine *machine, bool with_hits)
  265. {
  266. bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
  267. ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
  268. return ret;
  269. }
  270. static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
  271. {
  272. struct rb_node *nd;
  273. bool ret = machine__read_build_ids(&session->host_machine, with_hits);
  274. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  275. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  276. ret |= machine__read_build_ids(pos, with_hits);
  277. }
  278. return ret;
  279. }
  280. static int perf_header__adds_write(struct perf_header *header,
  281. struct perf_evlist *evlist, int fd)
  282. {
  283. int nr_sections;
  284. struct perf_session *session;
  285. struct perf_file_section *feat_sec;
  286. int sec_size;
  287. u64 sec_start;
  288. int idx = 0, err;
  289. session = container_of(header, struct perf_session, header);
  290. if (perf_header__has_feat(header, HEADER_BUILD_ID &&
  291. !perf_session__read_build_ids(session, true)))
  292. perf_header__clear_feat(header, HEADER_BUILD_ID);
  293. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  294. if (!nr_sections)
  295. return 0;
  296. feat_sec = calloc(sizeof(*feat_sec), nr_sections);
  297. if (feat_sec == NULL)
  298. return -ENOMEM;
  299. sec_size = sizeof(*feat_sec) * nr_sections;
  300. sec_start = header->data_offset + header->data_size;
  301. lseek(fd, sec_start + sec_size, SEEK_SET);
  302. if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
  303. struct perf_file_section *trace_sec;
  304. trace_sec = &feat_sec[idx++];
  305. /* Write trace info */
  306. trace_sec->offset = lseek(fd, 0, SEEK_CUR);
  307. read_tracing_data(fd, &evlist->entries);
  308. trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
  309. }
  310. if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
  311. struct perf_file_section *buildid_sec;
  312. buildid_sec = &feat_sec[idx++];
  313. /* Write build-ids */
  314. buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
  315. err = dsos__write_buildid_table(header, fd);
  316. if (err < 0) {
  317. pr_debug("failed to write buildid table\n");
  318. goto out_free;
  319. }
  320. buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
  321. buildid_sec->offset;
  322. if (!no_buildid_cache)
  323. perf_session__cache_build_ids(session);
  324. }
  325. lseek(fd, sec_start, SEEK_SET);
  326. err = do_write(fd, feat_sec, sec_size);
  327. if (err < 0)
  328. pr_debug("failed to write feature section\n");
  329. out_free:
  330. free(feat_sec);
  331. return err;
  332. }
  333. int perf_header__write_pipe(int fd)
  334. {
  335. struct perf_pipe_file_header f_header;
  336. int err;
  337. f_header = (struct perf_pipe_file_header){
  338. .magic = PERF_MAGIC,
  339. .size = sizeof(f_header),
  340. };
  341. err = do_write(fd, &f_header, sizeof(f_header));
  342. if (err < 0) {
  343. pr_debug("failed to write perf pipe header\n");
  344. return err;
  345. }
  346. return 0;
  347. }
  348. int perf_session__write_header(struct perf_session *session,
  349. struct perf_evlist *evlist,
  350. int fd, bool at_exit)
  351. {
  352. struct perf_file_header f_header;
  353. struct perf_file_attr f_attr;
  354. struct perf_header *header = &session->header;
  355. struct perf_evsel *attr, *pair = NULL;
  356. int err;
  357. lseek(fd, sizeof(f_header), SEEK_SET);
  358. if (session->evlist != evlist)
  359. pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
  360. list_for_each_entry(attr, &evlist->entries, node) {
  361. attr->id_offset = lseek(fd, 0, SEEK_CUR);
  362. err = do_write(fd, attr->id, attr->ids * sizeof(u64));
  363. if (err < 0) {
  364. out_err_write:
  365. pr_debug("failed to write perf header\n");
  366. return err;
  367. }
  368. if (session->evlist != evlist) {
  369. err = do_write(fd, pair->id, pair->ids * sizeof(u64));
  370. if (err < 0)
  371. goto out_err_write;
  372. attr->ids += pair->ids;
  373. pair = list_entry(pair->node.next, struct perf_evsel, node);
  374. }
  375. }
  376. header->attr_offset = lseek(fd, 0, SEEK_CUR);
  377. list_for_each_entry(attr, &evlist->entries, node) {
  378. f_attr = (struct perf_file_attr){
  379. .attr = attr->attr,
  380. .ids = {
  381. .offset = attr->id_offset,
  382. .size = attr->ids * sizeof(u64),
  383. }
  384. };
  385. err = do_write(fd, &f_attr, sizeof(f_attr));
  386. if (err < 0) {
  387. pr_debug("failed to write perf header attribute\n");
  388. return err;
  389. }
  390. }
  391. header->event_offset = lseek(fd, 0, SEEK_CUR);
  392. header->event_size = event_count * sizeof(struct perf_trace_event_type);
  393. if (events) {
  394. err = do_write(fd, events, header->event_size);
  395. if (err < 0) {
  396. pr_debug("failed to write perf header events\n");
  397. return err;
  398. }
  399. }
  400. header->data_offset = lseek(fd, 0, SEEK_CUR);
  401. if (at_exit) {
  402. err = perf_header__adds_write(header, evlist, fd);
  403. if (err < 0)
  404. return err;
  405. }
  406. f_header = (struct perf_file_header){
  407. .magic = PERF_MAGIC,
  408. .size = sizeof(f_header),
  409. .attr_size = sizeof(f_attr),
  410. .attrs = {
  411. .offset = header->attr_offset,
  412. .size = evlist->nr_entries * sizeof(f_attr),
  413. },
  414. .data = {
  415. .offset = header->data_offset,
  416. .size = header->data_size,
  417. },
  418. .event_types = {
  419. .offset = header->event_offset,
  420. .size = header->event_size,
  421. },
  422. };
  423. memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
  424. lseek(fd, 0, SEEK_SET);
  425. err = do_write(fd, &f_header, sizeof(f_header));
  426. if (err < 0) {
  427. pr_debug("failed to write perf header\n");
  428. return err;
  429. }
  430. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  431. header->frozen = 1;
  432. return 0;
  433. }
  434. static int perf_header__getbuffer64(struct perf_header *header,
  435. int fd, void *buf, size_t size)
  436. {
  437. if (readn(fd, buf, size) <= 0)
  438. return -1;
  439. if (header->needs_swap)
  440. mem_bswap_64(buf, size);
  441. return 0;
  442. }
  443. int perf_header__process_sections(struct perf_header *header, int fd,
  444. int (*process)(struct perf_file_section *section,
  445. struct perf_header *ph,
  446. int feat, int fd))
  447. {
  448. struct perf_file_section *feat_sec;
  449. int nr_sections;
  450. int sec_size;
  451. int idx = 0;
  452. int err = -1, feat = 1;
  453. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  454. if (!nr_sections)
  455. return 0;
  456. feat_sec = calloc(sizeof(*feat_sec), nr_sections);
  457. if (!feat_sec)
  458. return -1;
  459. sec_size = sizeof(*feat_sec) * nr_sections;
  460. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  461. if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
  462. goto out_free;
  463. err = 0;
  464. while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
  465. if (perf_header__has_feat(header, feat)) {
  466. struct perf_file_section *sec = &feat_sec[idx++];
  467. err = process(sec, header, feat, fd);
  468. if (err < 0)
  469. break;
  470. }
  471. ++feat;
  472. }
  473. out_free:
  474. free(feat_sec);
  475. return err;
  476. }
  477. int perf_file_header__read(struct perf_file_header *header,
  478. struct perf_header *ph, int fd)
  479. {
  480. lseek(fd, 0, SEEK_SET);
  481. if (readn(fd, header, sizeof(*header)) <= 0 ||
  482. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  483. return -1;
  484. if (header->attr_size != sizeof(struct perf_file_attr)) {
  485. u64 attr_size = bswap_64(header->attr_size);
  486. if (attr_size != sizeof(struct perf_file_attr))
  487. return -1;
  488. mem_bswap_64(header, offsetof(struct perf_file_header,
  489. adds_features));
  490. ph->needs_swap = true;
  491. }
  492. if (header->size != sizeof(*header)) {
  493. /* Support the previous format */
  494. if (header->size == offsetof(typeof(*header), adds_features))
  495. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  496. else
  497. return -1;
  498. }
  499. memcpy(&ph->adds_features, &header->adds_features,
  500. sizeof(ph->adds_features));
  501. /*
  502. * FIXME: hack that assumes that if we need swap the perf.data file
  503. * may be coming from an arch with a different word-size, ergo different
  504. * DEFINE_BITMAP format, investigate more later, but for now its mostly
  505. * safe to assume that we have a build-id section. Trace files probably
  506. * have several other issues in this realm anyway...
  507. */
  508. if (ph->needs_swap) {
  509. memset(&ph->adds_features, 0, sizeof(ph->adds_features));
  510. perf_header__set_feat(ph, HEADER_BUILD_ID);
  511. }
  512. ph->event_offset = header->event_types.offset;
  513. ph->event_size = header->event_types.size;
  514. ph->data_offset = header->data.offset;
  515. ph->data_size = header->data.size;
  516. return 0;
  517. }
  518. static int __event_process_build_id(struct build_id_event *bev,
  519. char *filename,
  520. struct perf_session *session)
  521. {
  522. int err = -1;
  523. struct list_head *head;
  524. struct machine *machine;
  525. u16 misc;
  526. struct dso *dso;
  527. enum dso_kernel_type dso_type;
  528. machine = perf_session__findnew_machine(session, bev->pid);
  529. if (!machine)
  530. goto out;
  531. misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  532. switch (misc) {
  533. case PERF_RECORD_MISC_KERNEL:
  534. dso_type = DSO_TYPE_KERNEL;
  535. head = &machine->kernel_dsos;
  536. break;
  537. case PERF_RECORD_MISC_GUEST_KERNEL:
  538. dso_type = DSO_TYPE_GUEST_KERNEL;
  539. head = &machine->kernel_dsos;
  540. break;
  541. case PERF_RECORD_MISC_USER:
  542. case PERF_RECORD_MISC_GUEST_USER:
  543. dso_type = DSO_TYPE_USER;
  544. head = &machine->user_dsos;
  545. break;
  546. default:
  547. goto out;
  548. }
  549. dso = __dsos__findnew(head, filename);
  550. if (dso != NULL) {
  551. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  552. dso__set_build_id(dso, &bev->build_id);
  553. if (filename[0] == '[')
  554. dso->kernel = dso_type;
  555. build_id__sprintf(dso->build_id, sizeof(dso->build_id),
  556. sbuild_id);
  557. pr_debug("build id event received for %s: %s\n",
  558. dso->long_name, sbuild_id);
  559. }
  560. err = 0;
  561. out:
  562. return err;
  563. }
  564. static int perf_header__read_build_ids(struct perf_header *header,
  565. int input, u64 offset, u64 size)
  566. {
  567. struct perf_session *session = container_of(header, struct perf_session, header);
  568. struct build_id_event bev;
  569. char filename[PATH_MAX];
  570. u64 limit = offset + size;
  571. int err = -1;
  572. while (offset < limit) {
  573. ssize_t len;
  574. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  575. goto out;
  576. if (header->needs_swap)
  577. perf_event_header__bswap(&bev.header);
  578. len = bev.header.size - sizeof(bev);
  579. if (read(input, filename, len) != len)
  580. goto out;
  581. __event_process_build_id(&bev, filename, session);
  582. offset += bev.header.size;
  583. }
  584. err = 0;
  585. out:
  586. return err;
  587. }
  588. static int perf_file_section__process(struct perf_file_section *section,
  589. struct perf_header *ph,
  590. int feat, int fd)
  591. {
  592. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  593. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  594. "%d, continuing...\n", section->offset, feat);
  595. return 0;
  596. }
  597. switch (feat) {
  598. case HEADER_TRACE_INFO:
  599. trace_report(fd, false);
  600. break;
  601. case HEADER_BUILD_ID:
  602. if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
  603. pr_debug("Failed to read buildids, continuing...\n");
  604. break;
  605. default:
  606. pr_debug("unknown feature %d, continuing...\n", feat);
  607. }
  608. return 0;
  609. }
  610. static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
  611. struct perf_header *ph, int fd,
  612. bool repipe)
  613. {
  614. if (readn(fd, header, sizeof(*header)) <= 0 ||
  615. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  616. return -1;
  617. if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
  618. return -1;
  619. if (header->size != sizeof(*header)) {
  620. u64 size = bswap_64(header->size);
  621. if (size != sizeof(*header))
  622. return -1;
  623. ph->needs_swap = true;
  624. }
  625. return 0;
  626. }
  627. static int perf_header__read_pipe(struct perf_session *session, int fd)
  628. {
  629. struct perf_header *header = &session->header;
  630. struct perf_pipe_file_header f_header;
  631. if (perf_file_header__read_pipe(&f_header, header, fd,
  632. session->repipe) < 0) {
  633. pr_debug("incompatible file format\n");
  634. return -EINVAL;
  635. }
  636. session->fd = fd;
  637. return 0;
  638. }
  639. int perf_session__read_header(struct perf_session *session, int fd)
  640. {
  641. struct perf_header *header = &session->header;
  642. struct perf_file_header f_header;
  643. struct perf_file_attr f_attr;
  644. u64 f_id;
  645. int nr_attrs, nr_ids, i, j;
  646. session->evlist = perf_evlist__new(NULL, NULL);
  647. if (session->evlist == NULL)
  648. return -ENOMEM;
  649. if (session->fd_pipe)
  650. return perf_header__read_pipe(session, fd);
  651. if (perf_file_header__read(&f_header, header, fd) < 0) {
  652. pr_debug("incompatible file format\n");
  653. return -EINVAL;
  654. }
  655. nr_attrs = f_header.attrs.size / sizeof(f_attr);
  656. lseek(fd, f_header.attrs.offset, SEEK_SET);
  657. for (i = 0; i < nr_attrs; i++) {
  658. struct perf_evsel *evsel;
  659. off_t tmp;
  660. if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
  661. goto out_errno;
  662. tmp = lseek(fd, 0, SEEK_CUR);
  663. evsel = perf_evsel__new(&f_attr.attr, i);
  664. if (evsel == NULL)
  665. goto out_delete_evlist;
  666. /*
  667. * Do it before so that if perf_evsel__alloc_id fails, this
  668. * entry gets purged too at perf_evlist__delete().
  669. */
  670. perf_evlist__add(session->evlist, evsel);
  671. nr_ids = f_attr.ids.size / sizeof(u64);
  672. /*
  673. * We don't have the cpu and thread maps on the header, so
  674. * for allocating the perf_sample_id table we fake 1 cpu and
  675. * hattr->ids threads.
  676. */
  677. if (perf_evsel__alloc_id(evsel, 1, nr_ids))
  678. goto out_delete_evlist;
  679. lseek(fd, f_attr.ids.offset, SEEK_SET);
  680. for (j = 0; j < nr_ids; j++) {
  681. if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
  682. goto out_errno;
  683. perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
  684. }
  685. lseek(fd, tmp, SEEK_SET);
  686. }
  687. if (f_header.event_types.size) {
  688. lseek(fd, f_header.event_types.offset, SEEK_SET);
  689. events = malloc(f_header.event_types.size);
  690. if (events == NULL)
  691. return -ENOMEM;
  692. if (perf_header__getbuffer64(header, fd, events,
  693. f_header.event_types.size))
  694. goto out_errno;
  695. event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
  696. }
  697. perf_header__process_sections(header, fd, perf_file_section__process);
  698. lseek(fd, header->data_offset, SEEK_SET);
  699. header->frozen = 1;
  700. return 0;
  701. out_errno:
  702. return -errno;
  703. out_delete_evlist:
  704. perf_evlist__delete(session->evlist);
  705. session->evlist = NULL;
  706. return -ENOMEM;
  707. }
  708. u64 perf_evlist__sample_type(struct perf_evlist *evlist)
  709. {
  710. struct perf_evsel *pos;
  711. u64 type = 0;
  712. list_for_each_entry(pos, &evlist->entries, node) {
  713. if (!type)
  714. type = pos->attr.sample_type;
  715. else if (type != pos->attr.sample_type)
  716. die("non matching sample_type");
  717. }
  718. return type;
  719. }
  720. bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
  721. {
  722. bool value = false, first = true;
  723. struct perf_evsel *pos;
  724. list_for_each_entry(pos, &evlist->entries, node) {
  725. if (first) {
  726. value = pos->attr.sample_id_all;
  727. first = false;
  728. } else if (value != pos->attr.sample_id_all)
  729. die("non matching sample_id_all");
  730. }
  731. return value;
  732. }
  733. int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
  734. perf_event__handler_t process,
  735. struct perf_session *session)
  736. {
  737. union perf_event *ev;
  738. size_t size;
  739. int err;
  740. size = sizeof(struct perf_event_attr);
  741. size = ALIGN(size, sizeof(u64));
  742. size += sizeof(struct perf_event_header);
  743. size += ids * sizeof(u64);
  744. ev = malloc(size);
  745. if (ev == NULL)
  746. return -ENOMEM;
  747. ev->attr.attr = *attr;
  748. memcpy(ev->attr.id, id, ids * sizeof(u64));
  749. ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
  750. ev->attr.header.size = size;
  751. err = process(ev, NULL, session);
  752. free(ev);
  753. return err;
  754. }
  755. int perf_session__synthesize_attrs(struct perf_session *session,
  756. perf_event__handler_t process)
  757. {
  758. struct perf_evsel *attr;
  759. int err = 0;
  760. list_for_each_entry(attr, &session->evlist->entries, node) {
  761. err = perf_event__synthesize_attr(&attr->attr, attr->ids,
  762. attr->id, process, session);
  763. if (err) {
  764. pr_debug("failed to create perf header attribute\n");
  765. return err;
  766. }
  767. }
  768. return err;
  769. }
  770. int perf_event__process_attr(union perf_event *event,
  771. struct perf_session *session)
  772. {
  773. unsigned int i, ids, n_ids;
  774. struct perf_evsel *evsel;
  775. if (session->evlist == NULL) {
  776. session->evlist = perf_evlist__new(NULL, NULL);
  777. if (session->evlist == NULL)
  778. return -ENOMEM;
  779. }
  780. evsel = perf_evsel__new(&event->attr.attr,
  781. session->evlist->nr_entries);
  782. if (evsel == NULL)
  783. return -ENOMEM;
  784. perf_evlist__add(session->evlist, evsel);
  785. ids = event->header.size;
  786. ids -= (void *)&event->attr.id - (void *)event;
  787. n_ids = ids / sizeof(u64);
  788. /*
  789. * We don't have the cpu and thread maps on the header, so
  790. * for allocating the perf_sample_id table we fake 1 cpu and
  791. * hattr->ids threads.
  792. */
  793. if (perf_evsel__alloc_id(evsel, 1, n_ids))
  794. return -ENOMEM;
  795. for (i = 0; i < n_ids; i++) {
  796. perf_evlist__id_add(session->evlist, evsel, 0, i,
  797. event->attr.id[i]);
  798. }
  799. perf_session__update_sample_type(session);
  800. return 0;
  801. }
  802. int perf_event__synthesize_event_type(u64 event_id, char *name,
  803. perf_event__handler_t process,
  804. struct perf_session *session)
  805. {
  806. union perf_event ev;
  807. size_t size = 0;
  808. int err = 0;
  809. memset(&ev, 0, sizeof(ev));
  810. ev.event_type.event_type.event_id = event_id;
  811. memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
  812. strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
  813. ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
  814. size = strlen(name);
  815. size = ALIGN(size, sizeof(u64));
  816. ev.event_type.header.size = sizeof(ev.event_type) -
  817. (sizeof(ev.event_type.event_type.name) - size);
  818. err = process(&ev, NULL, session);
  819. return err;
  820. }
  821. int perf_event__synthesize_event_types(perf_event__handler_t process,
  822. struct perf_session *session)
  823. {
  824. struct perf_trace_event_type *type;
  825. int i, err = 0;
  826. for (i = 0; i < event_count; i++) {
  827. type = &events[i];
  828. err = perf_event__synthesize_event_type(type->event_id,
  829. type->name, process,
  830. session);
  831. if (err) {
  832. pr_debug("failed to create perf header event type\n");
  833. return err;
  834. }
  835. }
  836. return err;
  837. }
  838. int perf_event__process_event_type(union perf_event *event,
  839. struct perf_session *session __unused)
  840. {
  841. if (perf_header__push_event(event->event_type.event_type.event_id,
  842. event->event_type.event_type.name) < 0)
  843. return -ENOMEM;
  844. return 0;
  845. }
  846. int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
  847. perf_event__handler_t process,
  848. struct perf_session *session __unused)
  849. {
  850. union perf_event ev;
  851. ssize_t size = 0, aligned_size = 0, padding;
  852. int err __used = 0;
  853. memset(&ev, 0, sizeof(ev));
  854. ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
  855. size = read_tracing_data_size(fd, &evlist->entries);
  856. if (size <= 0)
  857. return size;
  858. aligned_size = ALIGN(size, sizeof(u64));
  859. padding = aligned_size - size;
  860. ev.tracing_data.header.size = sizeof(ev.tracing_data);
  861. ev.tracing_data.size = aligned_size;
  862. process(&ev, NULL, session);
  863. err = read_tracing_data(fd, &evlist->entries);
  864. write_padded(fd, NULL, 0, padding);
  865. return aligned_size;
  866. }
  867. int perf_event__process_tracing_data(union perf_event *event,
  868. struct perf_session *session)
  869. {
  870. ssize_t size_read, padding, size = event->tracing_data.size;
  871. off_t offset = lseek(session->fd, 0, SEEK_CUR);
  872. char buf[BUFSIZ];
  873. /* setup for reading amidst mmap */
  874. lseek(session->fd, offset + sizeof(struct tracing_data_event),
  875. SEEK_SET);
  876. size_read = trace_report(session->fd, session->repipe);
  877. padding = ALIGN(size_read, sizeof(u64)) - size_read;
  878. if (read(session->fd, buf, padding) < 0)
  879. die("reading input file");
  880. if (session->repipe) {
  881. int retw = write(STDOUT_FILENO, buf, padding);
  882. if (retw <= 0 || retw != padding)
  883. die("repiping tracing data padding");
  884. }
  885. if (size_read + padding != size)
  886. die("tracing data size mismatch");
  887. return size_read + padding;
  888. }
  889. int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
  890. perf_event__handler_t process,
  891. struct machine *machine,
  892. struct perf_session *session)
  893. {
  894. union perf_event ev;
  895. size_t len;
  896. int err = 0;
  897. if (!pos->hit)
  898. return err;
  899. memset(&ev, 0, sizeof(ev));
  900. len = pos->long_name_len + 1;
  901. len = ALIGN(len, NAME_ALIGN);
  902. memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
  903. ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
  904. ev.build_id.header.misc = misc;
  905. ev.build_id.pid = machine->pid;
  906. ev.build_id.header.size = sizeof(ev.build_id) + len;
  907. memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
  908. err = process(&ev, NULL, session);
  909. return err;
  910. }
  911. int perf_event__process_build_id(union perf_event *event,
  912. struct perf_session *session)
  913. {
  914. __event_process_build_id(&event->build_id,
  915. event->build_id.filename,
  916. session);
  917. return 0;
  918. }
  919. void disable_buildid_cache(void)
  920. {
  921. no_buildid_cache = true;
  922. }