header.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331
  1. #define _FILE_OFFSET_BITS 64
  2. #include "util.h"
  3. #include <sys/types.h>
  4. #include <byteswap.h>
  5. #include <unistd.h>
  6. #include <stdio.h>
  7. #include <stdlib.h>
  8. #include <linux/list.h>
  9. #include <linux/kernel.h>
  10. #include <sys/utsname.h>
  11. #include "evlist.h"
  12. #include "evsel.h"
  13. #include "header.h"
  14. #include "../perf.h"
  15. #include "trace-event.h"
  16. #include "session.h"
  17. #include "symbol.h"
  18. #include "debug.h"
  19. #include "cpumap.h"
  20. static bool no_buildid_cache = false;
  21. static int event_count;
  22. static struct perf_trace_event_type *events;
  23. static u32 header_argc;
  24. static const char **header_argv;
  25. static int dsos__write_buildid_table(struct perf_header *header, int fd);
  26. static int perf_session__cache_build_ids(struct perf_session *session);
  27. int perf_header__push_event(u64 id, const char *name)
  28. {
  29. if (strlen(name) > MAX_EVENT_NAME)
  30. pr_warning("Event %s will be truncated\n", name);
  31. if (!events) {
  32. events = malloc(sizeof(struct perf_trace_event_type));
  33. if (events == NULL)
  34. return -ENOMEM;
  35. } else {
  36. struct perf_trace_event_type *nevents;
  37. nevents = realloc(events, (event_count + 1) * sizeof(*events));
  38. if (nevents == NULL)
  39. return -ENOMEM;
  40. events = nevents;
  41. }
  42. memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
  43. events[event_count].event_id = id;
  44. strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
  45. event_count++;
  46. return 0;
  47. }
  48. char *perf_header__find_event(u64 id)
  49. {
  50. int i;
  51. for (i = 0 ; i < event_count; i++) {
  52. if (events[i].event_id == id)
  53. return events[i].name;
  54. }
  55. return NULL;
  56. }
  57. static const char *__perf_magic = "PERFFILE";
  58. #define PERF_MAGIC (*(u64 *)__perf_magic)
  59. struct perf_file_attr {
  60. struct perf_event_attr attr;
  61. struct perf_file_section ids;
  62. };
  63. void perf_header__set_feat(struct perf_header *header, int feat)
  64. {
  65. set_bit(feat, header->adds_features);
  66. }
  67. void perf_header__clear_feat(struct perf_header *header, int feat)
  68. {
  69. clear_bit(feat, header->adds_features);
  70. }
  71. bool perf_header__has_feat(const struct perf_header *header, int feat)
  72. {
  73. return test_bit(feat, header->adds_features);
  74. }
  75. static int do_write(int fd, const void *buf, size_t size)
  76. {
  77. while (size) {
  78. int ret = write(fd, buf, size);
  79. if (ret < 0)
  80. return -errno;
  81. size -= ret;
  82. buf += ret;
  83. }
  84. return 0;
  85. }
  86. #define NAME_ALIGN 64
  87. static int write_padded(int fd, const void *bf, size_t count,
  88. size_t count_aligned)
  89. {
  90. static const char zero_buf[NAME_ALIGN];
  91. int err = do_write(fd, bf, count);
  92. if (!err)
  93. err = do_write(fd, zero_buf, count_aligned - count);
  94. return err;
  95. }
  96. static int do_write_string(int fd, const char *str)
  97. {
  98. u32 len, olen;
  99. int ret;
  100. olen = strlen(str) + 1;
  101. len = ALIGN(olen, NAME_ALIGN);
  102. /* write len, incl. \0 */
  103. ret = do_write(fd, &len, sizeof(len));
  104. if (ret < 0)
  105. return ret;
  106. return write_padded(fd, str, olen, len);
  107. }
  108. static char *do_read_string(int fd, struct perf_header *ph)
  109. {
  110. ssize_t sz, ret;
  111. u32 len;
  112. char *buf;
  113. sz = read(fd, &len, sizeof(len));
  114. if (sz < (ssize_t)sizeof(len))
  115. return NULL;
  116. if (ph->needs_swap)
  117. len = bswap_32(len);
  118. buf = malloc(len);
  119. if (!buf)
  120. return NULL;
  121. ret = read(fd, buf, len);
  122. if (ret == (ssize_t)len) {
  123. /*
  124. * strings are padded by zeroes
  125. * thus the actual strlen of buf
  126. * may be less than len
  127. */
  128. return buf;
  129. }
  130. free(buf);
  131. return NULL;
  132. }
  133. int
  134. perf_header__set_cmdline(int argc, const char **argv)
  135. {
  136. int i;
  137. header_argc = (u32)argc;
  138. /* do not include NULL termination */
  139. header_argv = calloc(argc, sizeof(char *));
  140. if (!header_argv)
  141. return -ENOMEM;
  142. /*
  143. * must copy argv contents because it gets moved
  144. * around during option parsing
  145. */
  146. for (i = 0; i < argc ; i++)
  147. header_argv[i] = argv[i];
  148. return 0;
  149. }
  150. static int write_trace_info(int fd, struct perf_header *h __used,
  151. struct perf_evlist *evlist)
  152. {
  153. return read_tracing_data(fd, &evlist->entries);
  154. }
  155. static int write_build_id(int fd, struct perf_header *h,
  156. struct perf_evlist *evlist __used)
  157. {
  158. struct perf_session *session;
  159. int err;
  160. session = container_of(h, struct perf_session, header);
  161. err = dsos__write_buildid_table(h, fd);
  162. if (err < 0) {
  163. pr_debug("failed to write buildid table\n");
  164. return err;
  165. }
  166. if (!no_buildid_cache)
  167. perf_session__cache_build_ids(session);
  168. return 0;
  169. }
  170. static int write_hostname(int fd, struct perf_header *h __used,
  171. struct perf_evlist *evlist __used)
  172. {
  173. struct utsname uts;
  174. int ret;
  175. ret = uname(&uts);
  176. if (ret < 0)
  177. return -1;
  178. return do_write_string(fd, uts.nodename);
  179. }
  180. static int write_osrelease(int fd, struct perf_header *h __used,
  181. struct perf_evlist *evlist __used)
  182. {
  183. struct utsname uts;
  184. int ret;
  185. ret = uname(&uts);
  186. if (ret < 0)
  187. return -1;
  188. return do_write_string(fd, uts.release);
  189. }
  190. static int write_arch(int fd, struct perf_header *h __used,
  191. struct perf_evlist *evlist __used)
  192. {
  193. struct utsname uts;
  194. int ret;
  195. ret = uname(&uts);
  196. if (ret < 0)
  197. return -1;
  198. return do_write_string(fd, uts.machine);
  199. }
  200. static int write_version(int fd, struct perf_header *h __used,
  201. struct perf_evlist *evlist __used)
  202. {
  203. return do_write_string(fd, perf_version_string);
  204. }
  205. static int write_cpudesc(int fd, struct perf_header *h __used,
  206. struct perf_evlist *evlist __used)
  207. {
  208. #ifndef CPUINFO_PROC
  209. #define CPUINFO_PROC NULL
  210. #endif
  211. FILE *file;
  212. char *buf = NULL;
  213. char *s, *p;
  214. const char *search = CPUINFO_PROC;
  215. size_t len = 0;
  216. int ret = -1;
  217. if (!search)
  218. return -1;
  219. file = fopen("/proc/cpuinfo", "r");
  220. if (!file)
  221. return -1;
  222. while (getline(&buf, &len, file) > 0) {
  223. ret = strncmp(buf, search, strlen(search));
  224. if (!ret)
  225. break;
  226. }
  227. if (ret)
  228. goto done;
  229. s = buf;
  230. p = strchr(buf, ':');
  231. if (p && *(p+1) == ' ' && *(p+2))
  232. s = p + 2;
  233. p = strchr(s, '\n');
  234. if (p)
  235. *p = '\0';
  236. /* squash extra space characters (branding string) */
  237. p = s;
  238. while (*p) {
  239. if (isspace(*p)) {
  240. char *r = p + 1;
  241. char *q = r;
  242. *p = ' ';
  243. while (*q && isspace(*q))
  244. q++;
  245. if (q != (p+1))
  246. while ((*r++ = *q++));
  247. }
  248. p++;
  249. }
  250. ret = do_write_string(fd, s);
  251. done:
  252. free(buf);
  253. fclose(file);
  254. return ret;
  255. }
  256. static int write_nrcpus(int fd, struct perf_header *h __used,
  257. struct perf_evlist *evlist __used)
  258. {
  259. long nr;
  260. u32 nrc, nra;
  261. int ret;
  262. nr = sysconf(_SC_NPROCESSORS_CONF);
  263. if (nr < 0)
  264. return -1;
  265. nrc = (u32)(nr & UINT_MAX);
  266. nr = sysconf(_SC_NPROCESSORS_ONLN);
  267. if (nr < 0)
  268. return -1;
  269. nra = (u32)(nr & UINT_MAX);
  270. ret = do_write(fd, &nrc, sizeof(nrc));
  271. if (ret < 0)
  272. return ret;
  273. return do_write(fd, &nra, sizeof(nra));
  274. }
  275. static int write_event_desc(int fd, struct perf_header *h __used,
  276. struct perf_evlist *evlist)
  277. {
  278. struct perf_evsel *attr;
  279. u32 nre = 0, nri, sz;
  280. int ret;
  281. list_for_each_entry(attr, &evlist->entries, node)
  282. nre++;
  283. /*
  284. * write number of events
  285. */
  286. ret = do_write(fd, &nre, sizeof(nre));
  287. if (ret < 0)
  288. return ret;
  289. /*
  290. * size of perf_event_attr struct
  291. */
  292. sz = (u32)sizeof(attr->attr);
  293. ret = do_write(fd, &sz, sizeof(sz));
  294. if (ret < 0)
  295. return ret;
  296. list_for_each_entry(attr, &evlist->entries, node) {
  297. ret = do_write(fd, &attr->attr, sz);
  298. if (ret < 0)
  299. return ret;
  300. /*
  301. * write number of unique id per event
  302. * there is one id per instance of an event
  303. *
  304. * copy into an nri to be independent of the
  305. * type of ids,
  306. */
  307. nri = attr->ids;
  308. ret = do_write(fd, &nri, sizeof(nri));
  309. if (ret < 0)
  310. return ret;
  311. /*
  312. * write event string as passed on cmdline
  313. */
  314. ret = do_write_string(fd, attr->name);
  315. if (ret < 0)
  316. return ret;
  317. /*
  318. * write unique ids for this event
  319. */
  320. ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
  321. if (ret < 0)
  322. return ret;
  323. }
  324. return 0;
  325. }
  326. static int write_cmdline(int fd, struct perf_header *h __used,
  327. struct perf_evlist *evlist __used)
  328. {
  329. char buf[MAXPATHLEN];
  330. char proc[32];
  331. u32 i, n;
  332. int ret;
  333. /*
  334. * actual atual path to perf binary
  335. */
  336. sprintf(proc, "/proc/%d/exe", getpid());
  337. ret = readlink(proc, buf, sizeof(buf));
  338. if (ret <= 0)
  339. return -1;
  340. /* readlink() does not add null termination */
  341. buf[ret] = '\0';
  342. /* account for binary path */
  343. n = header_argc + 1;
  344. ret = do_write(fd, &n, sizeof(n));
  345. if (ret < 0)
  346. return ret;
  347. ret = do_write_string(fd, buf);
  348. if (ret < 0)
  349. return ret;
  350. for (i = 0 ; i < header_argc; i++) {
  351. ret = do_write_string(fd, header_argv[i]);
  352. if (ret < 0)
  353. return ret;
  354. }
  355. return 0;
  356. }
  357. #define CORE_SIB_FMT \
  358. "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
  359. #define THRD_SIB_FMT \
  360. "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
  361. struct cpu_topo {
  362. u32 core_sib;
  363. u32 thread_sib;
  364. char **core_siblings;
  365. char **thread_siblings;
  366. };
  367. static int build_cpu_topo(struct cpu_topo *tp, int cpu)
  368. {
  369. FILE *fp;
  370. char filename[MAXPATHLEN];
  371. char *buf = NULL, *p;
  372. size_t len = 0;
  373. u32 i = 0;
  374. int ret = -1;
  375. sprintf(filename, CORE_SIB_FMT, cpu);
  376. fp = fopen(filename, "r");
  377. if (!fp)
  378. return -1;
  379. if (getline(&buf, &len, fp) <= 0)
  380. goto done;
  381. fclose(fp);
  382. p = strchr(buf, '\n');
  383. if (p)
  384. *p = '\0';
  385. for (i = 0; i < tp->core_sib; i++) {
  386. if (!strcmp(buf, tp->core_siblings[i]))
  387. break;
  388. }
  389. if (i == tp->core_sib) {
  390. tp->core_siblings[i] = buf;
  391. tp->core_sib++;
  392. buf = NULL;
  393. len = 0;
  394. }
  395. sprintf(filename, THRD_SIB_FMT, cpu);
  396. fp = fopen(filename, "r");
  397. if (!fp)
  398. goto done;
  399. if (getline(&buf, &len, fp) <= 0)
  400. goto done;
  401. p = strchr(buf, '\n');
  402. if (p)
  403. *p = '\0';
  404. for (i = 0; i < tp->thread_sib; i++) {
  405. if (!strcmp(buf, tp->thread_siblings[i]))
  406. break;
  407. }
  408. if (i == tp->thread_sib) {
  409. tp->thread_siblings[i] = buf;
  410. tp->thread_sib++;
  411. buf = NULL;
  412. }
  413. ret = 0;
  414. done:
  415. if(fp)
  416. fclose(fp);
  417. free(buf);
  418. return ret;
  419. }
  420. static void free_cpu_topo(struct cpu_topo *tp)
  421. {
  422. u32 i;
  423. if (!tp)
  424. return;
  425. for (i = 0 ; i < tp->core_sib; i++)
  426. free(tp->core_siblings[i]);
  427. for (i = 0 ; i < tp->thread_sib; i++)
  428. free(tp->thread_siblings[i]);
  429. free(tp);
  430. }
  431. static struct cpu_topo *build_cpu_topology(void)
  432. {
  433. struct cpu_topo *tp;
  434. void *addr;
  435. u32 nr, i;
  436. size_t sz;
  437. long ncpus;
  438. int ret = -1;
  439. ncpus = sysconf(_SC_NPROCESSORS_CONF);
  440. if (ncpus < 0)
  441. return NULL;
  442. nr = (u32)(ncpus & UINT_MAX);
  443. sz = nr * sizeof(char *);
  444. addr = calloc(1, sizeof(*tp) + 2 * sz);
  445. if (!addr)
  446. return NULL;
  447. tp = addr;
  448. addr += sizeof(*tp);
  449. tp->core_siblings = addr;
  450. addr += sz;
  451. tp->thread_siblings = addr;
  452. for (i = 0; i < nr; i++) {
  453. ret = build_cpu_topo(tp, i);
  454. if (ret < 0)
  455. break;
  456. }
  457. if (ret) {
  458. free_cpu_topo(tp);
  459. tp = NULL;
  460. }
  461. return tp;
  462. }
  463. static int write_cpu_topology(int fd, struct perf_header *h __used,
  464. struct perf_evlist *evlist __used)
  465. {
  466. struct cpu_topo *tp;
  467. u32 i;
  468. int ret;
  469. tp = build_cpu_topology();
  470. if (!tp)
  471. return -1;
  472. ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
  473. if (ret < 0)
  474. goto done;
  475. for (i = 0; i < tp->core_sib; i++) {
  476. ret = do_write_string(fd, tp->core_siblings[i]);
  477. if (ret < 0)
  478. goto done;
  479. }
  480. ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
  481. if (ret < 0)
  482. goto done;
  483. for (i = 0; i < tp->thread_sib; i++) {
  484. ret = do_write_string(fd, tp->thread_siblings[i]);
  485. if (ret < 0)
  486. break;
  487. }
  488. done:
  489. free_cpu_topo(tp);
  490. return ret;
  491. }
  492. static int write_total_mem(int fd, struct perf_header *h __used,
  493. struct perf_evlist *evlist __used)
  494. {
  495. char *buf = NULL;
  496. FILE *fp;
  497. size_t len = 0;
  498. int ret = -1, n;
  499. uint64_t mem;
  500. fp = fopen("/proc/meminfo", "r");
  501. if (!fp)
  502. return -1;
  503. while (getline(&buf, &len, fp) > 0) {
  504. ret = strncmp(buf, "MemTotal:", 9);
  505. if (!ret)
  506. break;
  507. }
  508. if (!ret) {
  509. n = sscanf(buf, "%*s %"PRIu64, &mem);
  510. if (n == 1)
  511. ret = do_write(fd, &mem, sizeof(mem));
  512. }
  513. free(buf);
  514. fclose(fp);
  515. return ret;
  516. }
  517. static int write_topo_node(int fd, int node)
  518. {
  519. char str[MAXPATHLEN];
  520. char field[32];
  521. char *buf = NULL, *p;
  522. size_t len = 0;
  523. FILE *fp;
  524. u64 mem_total, mem_free, mem;
  525. int ret = -1;
  526. sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
  527. fp = fopen(str, "r");
  528. if (!fp)
  529. return -1;
  530. while (getline(&buf, &len, fp) > 0) {
  531. /* skip over invalid lines */
  532. if (!strchr(buf, ':'))
  533. continue;
  534. if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
  535. goto done;
  536. if (!strcmp(field, "MemTotal:"))
  537. mem_total = mem;
  538. if (!strcmp(field, "MemFree:"))
  539. mem_free = mem;
  540. }
  541. fclose(fp);
  542. ret = do_write(fd, &mem_total, sizeof(u64));
  543. if (ret)
  544. goto done;
  545. ret = do_write(fd, &mem_free, sizeof(u64));
  546. if (ret)
  547. goto done;
  548. ret = -1;
  549. sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
  550. fp = fopen(str, "r");
  551. if (!fp)
  552. goto done;
  553. if (getline(&buf, &len, fp) <= 0)
  554. goto done;
  555. p = strchr(buf, '\n');
  556. if (p)
  557. *p = '\0';
  558. ret = do_write_string(fd, buf);
  559. done:
  560. free(buf);
  561. fclose(fp);
  562. return ret;
  563. }
  564. static int write_numa_topology(int fd, struct perf_header *h __used,
  565. struct perf_evlist *evlist __used)
  566. {
  567. char *buf = NULL;
  568. size_t len = 0;
  569. FILE *fp;
  570. struct cpu_map *node_map = NULL;
  571. char *c;
  572. u32 nr, i, j;
  573. int ret = -1;
  574. fp = fopen("/sys/devices/system/node/online", "r");
  575. if (!fp)
  576. return -1;
  577. if (getline(&buf, &len, fp) <= 0)
  578. goto done;
  579. c = strchr(buf, '\n');
  580. if (c)
  581. *c = '\0';
  582. node_map = cpu_map__new(buf);
  583. if (!node_map)
  584. goto done;
  585. nr = (u32)node_map->nr;
  586. ret = do_write(fd, &nr, sizeof(nr));
  587. if (ret < 0)
  588. goto done;
  589. for (i = 0; i < nr; i++) {
  590. j = (u32)node_map->map[i];
  591. ret = do_write(fd, &j, sizeof(j));
  592. if (ret < 0)
  593. break;
  594. ret = write_topo_node(fd, i);
  595. if (ret < 0)
  596. break;
  597. }
  598. done:
  599. free(buf);
  600. fclose(fp);
  601. free(node_map);
  602. return ret;
  603. }
  604. /*
  605. * default get_cpuid(): nothing gets recorded
  606. * actual implementation must be in arch/$(ARCH)/util/header.c
  607. */
  608. int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
  609. {
  610. return -1;
  611. }
  612. static int write_cpuid(int fd, struct perf_header *h __used,
  613. struct perf_evlist *evlist __used)
  614. {
  615. char buffer[64];
  616. int ret;
  617. ret = get_cpuid(buffer, sizeof(buffer));
  618. if (!ret)
  619. goto write_it;
  620. return -1;
  621. write_it:
  622. return do_write_string(fd, buffer);
  623. }
  624. static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
  625. {
  626. char *str = do_read_string(fd, ph);
  627. fprintf(fp, "# hostname : %s\n", str);
  628. free(str);
  629. }
  630. static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
  631. {
  632. char *str = do_read_string(fd, ph);
  633. fprintf(fp, "# os release : %s\n", str);
  634. free(str);
  635. }
  636. static void print_arch(struct perf_header *ph, int fd, FILE *fp)
  637. {
  638. char *str = do_read_string(fd, ph);
  639. fprintf(fp, "# arch : %s\n", str);
  640. free(str);
  641. }
  642. static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
  643. {
  644. char *str = do_read_string(fd, ph);
  645. fprintf(fp, "# cpudesc : %s\n", str);
  646. free(str);
  647. }
  648. static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
  649. {
  650. ssize_t ret;
  651. u32 nr;
  652. ret = read(fd, &nr, sizeof(nr));
  653. if (ret != (ssize_t)sizeof(nr))
  654. nr = -1; /* interpreted as error */
  655. if (ph->needs_swap)
  656. nr = bswap_32(nr);
  657. fprintf(fp, "# nrcpus online : %u\n", nr);
  658. ret = read(fd, &nr, sizeof(nr));
  659. if (ret != (ssize_t)sizeof(nr))
  660. nr = -1; /* interpreted as error */
  661. if (ph->needs_swap)
  662. nr = bswap_32(nr);
  663. fprintf(fp, "# nrcpus avail : %u\n", nr);
  664. }
  665. static void print_version(struct perf_header *ph, int fd, FILE *fp)
  666. {
  667. char *str = do_read_string(fd, ph);
  668. fprintf(fp, "# perf version : %s\n", str);
  669. free(str);
  670. }
  671. static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
  672. {
  673. ssize_t ret;
  674. char *str;
  675. u32 nr, i;
  676. ret = read(fd, &nr, sizeof(nr));
  677. if (ret != (ssize_t)sizeof(nr))
  678. return;
  679. if (ph->needs_swap)
  680. nr = bswap_32(nr);
  681. fprintf(fp, "# cmdline : ");
  682. for (i = 0; i < nr; i++) {
  683. str = do_read_string(fd, ph);
  684. fprintf(fp, "%s ", str);
  685. free(str);
  686. }
  687. fputc('\n', fp);
  688. }
  689. static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
  690. {
  691. ssize_t ret;
  692. u32 nr, i;
  693. char *str;
  694. ret = read(fd, &nr, sizeof(nr));
  695. if (ret != (ssize_t)sizeof(nr))
  696. return;
  697. if (ph->needs_swap)
  698. nr = bswap_32(nr);
  699. for (i = 0; i < nr; i++) {
  700. str = do_read_string(fd, ph);
  701. fprintf(fp, "# sibling cores : %s\n", str);
  702. free(str);
  703. }
  704. ret = read(fd, &nr, sizeof(nr));
  705. if (ret != (ssize_t)sizeof(nr))
  706. return;
  707. if (ph->needs_swap)
  708. nr = bswap_32(nr);
  709. for (i = 0; i < nr; i++) {
  710. str = do_read_string(fd, ph);
  711. fprintf(fp, "# sibling threads : %s\n", str);
  712. free(str);
  713. }
  714. }
  715. static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
  716. {
  717. struct perf_event_attr attr;
  718. uint64_t id;
  719. void *buf = NULL;
  720. char *str;
  721. u32 nre, sz, nr, i, j, msz;
  722. int ret;
  723. /* number of events */
  724. ret = read(fd, &nre, sizeof(nre));
  725. if (ret != (ssize_t)sizeof(nre))
  726. goto error;
  727. if (ph->needs_swap)
  728. nre = bswap_32(nre);
  729. ret = read(fd, &sz, sizeof(sz));
  730. if (ret != (ssize_t)sizeof(sz))
  731. goto error;
  732. if (ph->needs_swap)
  733. sz = bswap_32(sz);
  734. /*
  735. * ensure it is at least to our ABI rev
  736. */
  737. if (sz < (u32)sizeof(attr))
  738. goto error;
  739. memset(&attr, 0, sizeof(attr));
  740. /* read entire region to sync up to next field */
  741. buf = malloc(sz);
  742. if (!buf)
  743. goto error;
  744. msz = sizeof(attr);
  745. if (sz < msz)
  746. msz = sz;
  747. for (i = 0 ; i < nre; i++) {
  748. ret = read(fd, buf, sz);
  749. if (ret != (ssize_t)sz)
  750. goto error;
  751. if (ph->needs_swap)
  752. perf_event__attr_swap(buf);
  753. memcpy(&attr, buf, msz);
  754. ret = read(fd, &nr, sizeof(nr));
  755. if (ret != (ssize_t)sizeof(nr))
  756. goto error;
  757. if (ph->needs_swap)
  758. nr = bswap_32(nr);
  759. str = do_read_string(fd, ph);
  760. fprintf(fp, "# event : name = %s, ", str);
  761. free(str);
  762. fprintf(fp, "type = %d, config = 0x%"PRIx64
  763. ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
  764. attr.type,
  765. (u64)attr.config,
  766. (u64)attr.config1,
  767. (u64)attr.config2);
  768. fprintf(fp, ", excl_usr = %d, excl_kern = %d",
  769. attr.exclude_user,
  770. attr.exclude_kernel);
  771. if (nr)
  772. fprintf(fp, ", id = {");
  773. for (j = 0 ; j < nr; j++) {
  774. ret = read(fd, &id, sizeof(id));
  775. if (ret != (ssize_t)sizeof(id))
  776. goto error;
  777. if (ph->needs_swap)
  778. id = bswap_64(id);
  779. if (j)
  780. fputc(',', fp);
  781. fprintf(fp, " %"PRIu64, id);
  782. }
  783. if (nr && j == nr)
  784. fprintf(fp, " }");
  785. fputc('\n', fp);
  786. }
  787. free(buf);
  788. return;
  789. error:
  790. fprintf(fp, "# event desc: not available or unable to read\n");
  791. }
  792. static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
  793. {
  794. uint64_t mem;
  795. ssize_t ret;
  796. ret = read(fd, &mem, sizeof(mem));
  797. if (ret != sizeof(mem))
  798. goto error;
  799. if (h->needs_swap)
  800. mem = bswap_64(mem);
  801. fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
  802. return;
  803. error:
  804. fprintf(fp, "# total memory : unknown\n");
  805. }
  806. static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
  807. {
  808. ssize_t ret;
  809. u32 nr, c, i;
  810. char *str;
  811. uint64_t mem_total, mem_free;
  812. /* nr nodes */
  813. ret = read(fd, &nr, sizeof(nr));
  814. if (ret != (ssize_t)sizeof(nr))
  815. goto error;
  816. if (h->needs_swap)
  817. nr = bswap_32(nr);
  818. for (i = 0; i < nr; i++) {
  819. /* node number */
  820. ret = read(fd, &c, sizeof(c));
  821. if (ret != (ssize_t)sizeof(c))
  822. goto error;
  823. if (h->needs_swap)
  824. c = bswap_32(c);
  825. ret = read(fd, &mem_total, sizeof(u64));
  826. if (ret != sizeof(u64))
  827. goto error;
  828. ret = read(fd, &mem_free, sizeof(u64));
  829. if (ret != sizeof(u64))
  830. goto error;
  831. if (h->needs_swap) {
  832. mem_total = bswap_64(mem_total);
  833. mem_free = bswap_64(mem_free);
  834. }
  835. fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
  836. " free = %"PRIu64" kB\n",
  837. c,
  838. mem_total,
  839. mem_free);
  840. str = do_read_string(fd, h);
  841. fprintf(fp, "# node%u cpu list : %s\n", c, str);
  842. free(str);
  843. }
  844. return;
  845. error:
  846. fprintf(fp, "# numa topology : not available\n");
  847. }
  848. static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
  849. {
  850. char *str = do_read_string(fd, ph);
  851. fprintf(fp, "# cpuid : %s\n", str);
  852. free(str);
  853. }
  854. struct feature_ops {
  855. int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
  856. void (*print)(struct perf_header *h, int fd, FILE *fp);
  857. const char *name;
  858. bool full_only;
  859. };
  860. #define FEAT_OPA(n, w, p) \
  861. [n] = { .name = #n, .write = w, .print = p }
  862. #define FEAT_OPF(n, w, p) \
  863. [n] = { .name = #n, .write = w, .print = p, .full_only = true }
  864. static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
  865. FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL),
  866. FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL),
  867. FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname),
  868. FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease),
  869. FEAT_OPA(HEADER_VERSION, write_version, print_version),
  870. FEAT_OPA(HEADER_ARCH, write_arch, print_arch),
  871. FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus),
  872. FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc),
  873. FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid),
  874. FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem),
  875. FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc),
  876. FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline),
  877. FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology),
  878. FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology),
  879. };
  880. struct header_print_data {
  881. FILE *fp;
  882. bool full; /* extended list of headers */
  883. };
  884. static int perf_file_section__fprintf_info(struct perf_file_section *section,
  885. struct perf_header *ph,
  886. int feat, int fd, void *data)
  887. {
  888. struct header_print_data *hd = data;
  889. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  890. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  891. "%d, continuing...\n", section->offset, feat);
  892. return 0;
  893. }
  894. if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) {
  895. pr_warning("unknown feature %d\n", feat);
  896. return -1;
  897. }
  898. if (!feat_ops[feat].print)
  899. return 0;
  900. if (!feat_ops[feat].full_only || hd->full)
  901. feat_ops[feat].print(ph, fd, hd->fp);
  902. else
  903. fprintf(hd->fp, "# %s info available, use -I to display\n",
  904. feat_ops[feat].name);
  905. return 0;
  906. }
  907. int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
  908. {
  909. struct header_print_data hd;
  910. struct perf_header *header = &session->header;
  911. int fd = session->fd;
  912. hd.fp = fp;
  913. hd.full = full;
  914. perf_header__process_sections(header, fd, &hd,
  915. perf_file_section__fprintf_info);
  916. return 0;
  917. }
  918. #define dsos__for_each_with_build_id(pos, head) \
  919. list_for_each_entry(pos, head, node) \
  920. if (!pos->has_build_id) \
  921. continue; \
  922. else
  923. static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
  924. u16 misc, int fd)
  925. {
  926. struct dso *pos;
  927. dsos__for_each_with_build_id(pos, head) {
  928. int err;
  929. struct build_id_event b;
  930. size_t len;
  931. if (!pos->hit)
  932. continue;
  933. len = pos->long_name_len + 1;
  934. len = ALIGN(len, NAME_ALIGN);
  935. memset(&b, 0, sizeof(b));
  936. memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
  937. b.pid = pid;
  938. b.header.misc = misc;
  939. b.header.size = sizeof(b) + len;
  940. err = do_write(fd, &b, sizeof(b));
  941. if (err < 0)
  942. return err;
  943. err = write_padded(fd, pos->long_name,
  944. pos->long_name_len + 1, len);
  945. if (err < 0)
  946. return err;
  947. }
  948. return 0;
  949. }
  950. static int machine__write_buildid_table(struct machine *machine, int fd)
  951. {
  952. int err;
  953. u16 kmisc = PERF_RECORD_MISC_KERNEL,
  954. umisc = PERF_RECORD_MISC_USER;
  955. if (!machine__is_host(machine)) {
  956. kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
  957. umisc = PERF_RECORD_MISC_GUEST_USER;
  958. }
  959. err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
  960. kmisc, fd);
  961. if (err == 0)
  962. err = __dsos__write_buildid_table(&machine->user_dsos,
  963. machine->pid, umisc, fd);
  964. return err;
  965. }
  966. static int dsos__write_buildid_table(struct perf_header *header, int fd)
  967. {
  968. struct perf_session *session = container_of(header,
  969. struct perf_session, header);
  970. struct rb_node *nd;
  971. int err = machine__write_buildid_table(&session->host_machine, fd);
  972. if (err)
  973. return err;
  974. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  975. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  976. err = machine__write_buildid_table(pos, fd);
  977. if (err)
  978. break;
  979. }
  980. return err;
  981. }
  982. int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
  983. const char *name, bool is_kallsyms)
  984. {
  985. const size_t size = PATH_MAX;
  986. char *realname, *filename = zalloc(size),
  987. *linkname = zalloc(size), *targetname;
  988. int len, err = -1;
  989. if (is_kallsyms) {
  990. if (symbol_conf.kptr_restrict) {
  991. pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
  992. return 0;
  993. }
  994. realname = (char *)name;
  995. } else
  996. realname = realpath(name, NULL);
  997. if (realname == NULL || filename == NULL || linkname == NULL)
  998. goto out_free;
  999. len = snprintf(filename, size, "%s%s%s",
  1000. debugdir, is_kallsyms ? "/" : "", realname);
  1001. if (mkdir_p(filename, 0755))
  1002. goto out_free;
  1003. snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
  1004. if (access(filename, F_OK)) {
  1005. if (is_kallsyms) {
  1006. if (copyfile("/proc/kallsyms", filename))
  1007. goto out_free;
  1008. } else if (link(realname, filename) && copyfile(name, filename))
  1009. goto out_free;
  1010. }
  1011. len = snprintf(linkname, size, "%s/.build-id/%.2s",
  1012. debugdir, sbuild_id);
  1013. if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
  1014. goto out_free;
  1015. snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
  1016. targetname = filename + strlen(debugdir) - 5;
  1017. memcpy(targetname, "../..", 5);
  1018. if (symlink(targetname, linkname) == 0)
  1019. err = 0;
  1020. out_free:
  1021. if (!is_kallsyms)
  1022. free(realname);
  1023. free(filename);
  1024. free(linkname);
  1025. return err;
  1026. }
  1027. static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
  1028. const char *name, const char *debugdir,
  1029. bool is_kallsyms)
  1030. {
  1031. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1032. build_id__sprintf(build_id, build_id_size, sbuild_id);
  1033. return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
  1034. }
  1035. int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
  1036. {
  1037. const size_t size = PATH_MAX;
  1038. char *filename = zalloc(size),
  1039. *linkname = zalloc(size);
  1040. int err = -1;
  1041. if (filename == NULL || linkname == NULL)
  1042. goto out_free;
  1043. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  1044. debugdir, sbuild_id, sbuild_id + 2);
  1045. if (access(linkname, F_OK))
  1046. goto out_free;
  1047. if (readlink(linkname, filename, size - 1) < 0)
  1048. goto out_free;
  1049. if (unlink(linkname))
  1050. goto out_free;
  1051. /*
  1052. * Since the link is relative, we must make it absolute:
  1053. */
  1054. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  1055. debugdir, sbuild_id, filename);
  1056. if (unlink(linkname))
  1057. goto out_free;
  1058. err = 0;
  1059. out_free:
  1060. free(filename);
  1061. free(linkname);
  1062. return err;
  1063. }
  1064. static int dso__cache_build_id(struct dso *dso, const char *debugdir)
  1065. {
  1066. bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
  1067. return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
  1068. dso->long_name, debugdir, is_kallsyms);
  1069. }
  1070. static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
  1071. {
  1072. struct dso *pos;
  1073. int err = 0;
  1074. dsos__for_each_with_build_id(pos, head)
  1075. if (dso__cache_build_id(pos, debugdir))
  1076. err = -1;
  1077. return err;
  1078. }
  1079. static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
  1080. {
  1081. int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
  1082. ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
  1083. return ret;
  1084. }
  1085. static int perf_session__cache_build_ids(struct perf_session *session)
  1086. {
  1087. struct rb_node *nd;
  1088. int ret;
  1089. char debugdir[PATH_MAX];
  1090. snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
  1091. if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
  1092. return -1;
  1093. ret = machine__cache_build_ids(&session->host_machine, debugdir);
  1094. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  1095. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  1096. ret |= machine__cache_build_ids(pos, debugdir);
  1097. }
  1098. return ret ? -1 : 0;
  1099. }
  1100. static bool machine__read_build_ids(struct machine *machine, bool with_hits)
  1101. {
  1102. bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
  1103. ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
  1104. return ret;
  1105. }
  1106. static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
  1107. {
  1108. struct rb_node *nd;
  1109. bool ret = machine__read_build_ids(&session->host_machine, with_hits);
  1110. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  1111. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  1112. ret |= machine__read_build_ids(pos, with_hits);
  1113. }
  1114. return ret;
  1115. }
  1116. static int do_write_feat(int fd, struct perf_header *h, int type,
  1117. struct perf_file_section **p,
  1118. struct perf_evlist *evlist)
  1119. {
  1120. int err;
  1121. int ret = 0;
  1122. if (perf_header__has_feat(h, type)) {
  1123. (*p)->offset = lseek(fd, 0, SEEK_CUR);
  1124. err = feat_ops[type].write(fd, h, evlist);
  1125. if (err < 0) {
  1126. pr_debug("failed to write feature %d\n", type);
  1127. /* undo anything written */
  1128. lseek(fd, (*p)->offset, SEEK_SET);
  1129. return -1;
  1130. }
  1131. (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
  1132. (*p)++;
  1133. }
  1134. return ret;
  1135. }
  1136. static int perf_header__adds_write(struct perf_header *header,
  1137. struct perf_evlist *evlist, int fd)
  1138. {
  1139. int nr_sections;
  1140. struct perf_session *session;
  1141. struct perf_file_section *feat_sec, *p;
  1142. int sec_size;
  1143. u64 sec_start;
  1144. int err;
  1145. session = container_of(header, struct perf_session, header);
  1146. if (perf_header__has_feat(header, HEADER_BUILD_ID &&
  1147. !perf_session__read_build_ids(session, true)))
  1148. perf_header__clear_feat(header, HEADER_BUILD_ID);
  1149. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  1150. if (!nr_sections)
  1151. return 0;
  1152. feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
  1153. if (feat_sec == NULL)
  1154. return -ENOMEM;
  1155. sec_size = sizeof(*feat_sec) * nr_sections;
  1156. sec_start = header->data_offset + header->data_size;
  1157. lseek(fd, sec_start + sec_size, SEEK_SET);
  1158. err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist);
  1159. if (err)
  1160. goto out_free;
  1161. err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist);
  1162. if (err) {
  1163. perf_header__clear_feat(header, HEADER_BUILD_ID);
  1164. goto out_free;
  1165. }
  1166. err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist);
  1167. if (err)
  1168. perf_header__clear_feat(header, HEADER_HOSTNAME);
  1169. err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist);
  1170. if (err)
  1171. perf_header__clear_feat(header, HEADER_OSRELEASE);
  1172. err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist);
  1173. if (err)
  1174. perf_header__clear_feat(header, HEADER_VERSION);
  1175. err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist);
  1176. if (err)
  1177. perf_header__clear_feat(header, HEADER_ARCH);
  1178. err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist);
  1179. if (err)
  1180. perf_header__clear_feat(header, HEADER_NRCPUS);
  1181. err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist);
  1182. if (err)
  1183. perf_header__clear_feat(header, HEADER_CPUDESC);
  1184. err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist);
  1185. if (err)
  1186. perf_header__clear_feat(header, HEADER_CPUID);
  1187. err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist);
  1188. if (err)
  1189. perf_header__clear_feat(header, HEADER_TOTAL_MEM);
  1190. err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist);
  1191. if (err)
  1192. perf_header__clear_feat(header, HEADER_CMDLINE);
  1193. err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist);
  1194. if (err)
  1195. perf_header__clear_feat(header, HEADER_EVENT_DESC);
  1196. err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist);
  1197. if (err)
  1198. perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY);
  1199. err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist);
  1200. if (err)
  1201. perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY);
  1202. lseek(fd, sec_start, SEEK_SET);
  1203. /*
  1204. * may write more than needed due to dropped feature, but
  1205. * this is okay, reader will skip the mising entries
  1206. */
  1207. err = do_write(fd, feat_sec, sec_size);
  1208. if (err < 0)
  1209. pr_debug("failed to write feature section\n");
  1210. out_free:
  1211. free(feat_sec);
  1212. return err;
  1213. }
  1214. int perf_header__write_pipe(int fd)
  1215. {
  1216. struct perf_pipe_file_header f_header;
  1217. int err;
  1218. f_header = (struct perf_pipe_file_header){
  1219. .magic = PERF_MAGIC,
  1220. .size = sizeof(f_header),
  1221. };
  1222. err = do_write(fd, &f_header, sizeof(f_header));
  1223. if (err < 0) {
  1224. pr_debug("failed to write perf pipe header\n");
  1225. return err;
  1226. }
  1227. return 0;
  1228. }
  1229. int perf_session__write_header(struct perf_session *session,
  1230. struct perf_evlist *evlist,
  1231. int fd, bool at_exit)
  1232. {
  1233. struct perf_file_header f_header;
  1234. struct perf_file_attr f_attr;
  1235. struct perf_header *header = &session->header;
  1236. struct perf_evsel *attr, *pair = NULL;
  1237. int err;
  1238. lseek(fd, sizeof(f_header), SEEK_SET);
  1239. if (session->evlist != evlist)
  1240. pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
  1241. list_for_each_entry(attr, &evlist->entries, node) {
  1242. attr->id_offset = lseek(fd, 0, SEEK_CUR);
  1243. err = do_write(fd, attr->id, attr->ids * sizeof(u64));
  1244. if (err < 0) {
  1245. out_err_write:
  1246. pr_debug("failed to write perf header\n");
  1247. return err;
  1248. }
  1249. if (session->evlist != evlist) {
  1250. err = do_write(fd, pair->id, pair->ids * sizeof(u64));
  1251. if (err < 0)
  1252. goto out_err_write;
  1253. attr->ids += pair->ids;
  1254. pair = list_entry(pair->node.next, struct perf_evsel, node);
  1255. }
  1256. }
  1257. header->attr_offset = lseek(fd, 0, SEEK_CUR);
  1258. list_for_each_entry(attr, &evlist->entries, node) {
  1259. f_attr = (struct perf_file_attr){
  1260. .attr = attr->attr,
  1261. .ids = {
  1262. .offset = attr->id_offset,
  1263. .size = attr->ids * sizeof(u64),
  1264. }
  1265. };
  1266. err = do_write(fd, &f_attr, sizeof(f_attr));
  1267. if (err < 0) {
  1268. pr_debug("failed to write perf header attribute\n");
  1269. return err;
  1270. }
  1271. }
  1272. header->event_offset = lseek(fd, 0, SEEK_CUR);
  1273. header->event_size = event_count * sizeof(struct perf_trace_event_type);
  1274. if (events) {
  1275. err = do_write(fd, events, header->event_size);
  1276. if (err < 0) {
  1277. pr_debug("failed to write perf header events\n");
  1278. return err;
  1279. }
  1280. }
  1281. header->data_offset = lseek(fd, 0, SEEK_CUR);
  1282. if (at_exit) {
  1283. err = perf_header__adds_write(header, evlist, fd);
  1284. if (err < 0)
  1285. return err;
  1286. }
  1287. f_header = (struct perf_file_header){
  1288. .magic = PERF_MAGIC,
  1289. .size = sizeof(f_header),
  1290. .attr_size = sizeof(f_attr),
  1291. .attrs = {
  1292. .offset = header->attr_offset,
  1293. .size = evlist->nr_entries * sizeof(f_attr),
  1294. },
  1295. .data = {
  1296. .offset = header->data_offset,
  1297. .size = header->data_size,
  1298. },
  1299. .event_types = {
  1300. .offset = header->event_offset,
  1301. .size = header->event_size,
  1302. },
  1303. };
  1304. memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
  1305. lseek(fd, 0, SEEK_SET);
  1306. err = do_write(fd, &f_header, sizeof(f_header));
  1307. if (err < 0) {
  1308. pr_debug("failed to write perf header\n");
  1309. return err;
  1310. }
  1311. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  1312. header->frozen = 1;
  1313. return 0;
  1314. }
  1315. static int perf_header__getbuffer64(struct perf_header *header,
  1316. int fd, void *buf, size_t size)
  1317. {
  1318. if (readn(fd, buf, size) <= 0)
  1319. return -1;
  1320. if (header->needs_swap)
  1321. mem_bswap_64(buf, size);
  1322. return 0;
  1323. }
  1324. int perf_header__process_sections(struct perf_header *header, int fd,
  1325. void *data,
  1326. int (*process)(struct perf_file_section *section,
  1327. struct perf_header *ph,
  1328. int feat, int fd, void *data))
  1329. {
  1330. struct perf_file_section *feat_sec;
  1331. int nr_sections;
  1332. int sec_size;
  1333. int idx = 0;
  1334. int err = -1, feat = 1;
  1335. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  1336. if (!nr_sections)
  1337. return 0;
  1338. feat_sec = calloc(sizeof(*feat_sec), nr_sections);
  1339. if (!feat_sec)
  1340. return -1;
  1341. sec_size = sizeof(*feat_sec) * nr_sections;
  1342. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  1343. if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
  1344. goto out_free;
  1345. err = 0;
  1346. while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
  1347. if (perf_header__has_feat(header, feat)) {
  1348. struct perf_file_section *sec = &feat_sec[idx++];
  1349. err = process(sec, header, feat, fd, data);
  1350. if (err < 0)
  1351. break;
  1352. }
  1353. ++feat;
  1354. }
  1355. out_free:
  1356. free(feat_sec);
  1357. return err;
  1358. }
  1359. int perf_file_header__read(struct perf_file_header *header,
  1360. struct perf_header *ph, int fd)
  1361. {
  1362. lseek(fd, 0, SEEK_SET);
  1363. if (readn(fd, header, sizeof(*header)) <= 0 ||
  1364. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  1365. return -1;
  1366. if (header->attr_size != sizeof(struct perf_file_attr)) {
  1367. u64 attr_size = bswap_64(header->attr_size);
  1368. if (attr_size != sizeof(struct perf_file_attr))
  1369. return -1;
  1370. mem_bswap_64(header, offsetof(struct perf_file_header,
  1371. adds_features));
  1372. ph->needs_swap = true;
  1373. }
  1374. if (header->size != sizeof(*header)) {
  1375. /* Support the previous format */
  1376. if (header->size == offsetof(typeof(*header), adds_features))
  1377. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  1378. else
  1379. return -1;
  1380. } else if (ph->needs_swap) {
  1381. unsigned int i;
  1382. /*
  1383. * feature bitmap is declared as an array of unsigned longs --
  1384. * not good since its size can differ between the host that
  1385. * generated the data file and the host analyzing the file.
  1386. *
  1387. * We need to handle endianness, but we don't know the size of
  1388. * the unsigned long where the file was generated. Take a best
  1389. * guess at determining it: try 64-bit swap first (ie., file
  1390. * created on a 64-bit host), and check if the hostname feature
  1391. * bit is set (this feature bit is forced on as of fbe96f2).
  1392. * If the bit is not, undo the 64-bit swap and try a 32-bit
  1393. * swap. If the hostname bit is still not set (e.g., older data
  1394. * file), punt and fallback to the original behavior --
  1395. * clearing all feature bits and setting buildid.
  1396. */
  1397. for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
  1398. header->adds_features[i] = bswap_64(header->adds_features[i]);
  1399. if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
  1400. for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
  1401. header->adds_features[i] = bswap_64(header->adds_features[i]);
  1402. header->adds_features[i] = bswap_32(header->adds_features[i]);
  1403. }
  1404. }
  1405. if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
  1406. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  1407. set_bit(HEADER_BUILD_ID, header->adds_features);
  1408. }
  1409. }
  1410. memcpy(&ph->adds_features, &header->adds_features,
  1411. sizeof(ph->adds_features));
  1412. ph->event_offset = header->event_types.offset;
  1413. ph->event_size = header->event_types.size;
  1414. ph->data_offset = header->data.offset;
  1415. ph->data_size = header->data.size;
  1416. return 0;
  1417. }
  1418. static int __event_process_build_id(struct build_id_event *bev,
  1419. char *filename,
  1420. struct perf_session *session)
  1421. {
  1422. int err = -1;
  1423. struct list_head *head;
  1424. struct machine *machine;
  1425. u16 misc;
  1426. struct dso *dso;
  1427. enum dso_kernel_type dso_type;
  1428. machine = perf_session__findnew_machine(session, bev->pid);
  1429. if (!machine)
  1430. goto out;
  1431. misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  1432. switch (misc) {
  1433. case PERF_RECORD_MISC_KERNEL:
  1434. dso_type = DSO_TYPE_KERNEL;
  1435. head = &machine->kernel_dsos;
  1436. break;
  1437. case PERF_RECORD_MISC_GUEST_KERNEL:
  1438. dso_type = DSO_TYPE_GUEST_KERNEL;
  1439. head = &machine->kernel_dsos;
  1440. break;
  1441. case PERF_RECORD_MISC_USER:
  1442. case PERF_RECORD_MISC_GUEST_USER:
  1443. dso_type = DSO_TYPE_USER;
  1444. head = &machine->user_dsos;
  1445. break;
  1446. default:
  1447. goto out;
  1448. }
  1449. dso = __dsos__findnew(head, filename);
  1450. if (dso != NULL) {
  1451. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  1452. dso__set_build_id(dso, &bev->build_id);
  1453. if (filename[0] == '[')
  1454. dso->kernel = dso_type;
  1455. build_id__sprintf(dso->build_id, sizeof(dso->build_id),
  1456. sbuild_id);
  1457. pr_debug("build id event received for %s: %s\n",
  1458. dso->long_name, sbuild_id);
  1459. }
  1460. err = 0;
  1461. out:
  1462. return err;
  1463. }
  1464. static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
  1465. int input, u64 offset, u64 size)
  1466. {
  1467. struct perf_session *session = container_of(header, struct perf_session, header);
  1468. struct {
  1469. struct perf_event_header header;
  1470. u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  1471. char filename[0];
  1472. } old_bev;
  1473. struct build_id_event bev;
  1474. char filename[PATH_MAX];
  1475. u64 limit = offset + size;
  1476. while (offset < limit) {
  1477. ssize_t len;
  1478. if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
  1479. return -1;
  1480. if (header->needs_swap)
  1481. perf_event_header__bswap(&old_bev.header);
  1482. len = old_bev.header.size - sizeof(old_bev);
  1483. if (read(input, filename, len) != len)
  1484. return -1;
  1485. bev.header = old_bev.header;
  1486. /*
  1487. * As the pid is the missing value, we need to fill
  1488. * it properly. The header.misc value give us nice hint.
  1489. */
  1490. bev.pid = HOST_KERNEL_ID;
  1491. if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
  1492. bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
  1493. bev.pid = DEFAULT_GUEST_KERNEL_ID;
  1494. memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
  1495. __event_process_build_id(&bev, filename, session);
  1496. offset += bev.header.size;
  1497. }
  1498. return 0;
  1499. }
  1500. static int perf_header__read_build_ids(struct perf_header *header,
  1501. int input, u64 offset, u64 size)
  1502. {
  1503. struct perf_session *session = container_of(header, struct perf_session, header);
  1504. struct build_id_event bev;
  1505. char filename[PATH_MAX];
  1506. u64 limit = offset + size, orig_offset = offset;
  1507. int err = -1;
  1508. while (offset < limit) {
  1509. ssize_t len;
  1510. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  1511. goto out;
  1512. if (header->needs_swap)
  1513. perf_event_header__bswap(&bev.header);
  1514. len = bev.header.size - sizeof(bev);
  1515. if (read(input, filename, len) != len)
  1516. goto out;
  1517. /*
  1518. * The a1645ce1 changeset:
  1519. *
  1520. * "perf: 'perf kvm' tool for monitoring guest performance from host"
  1521. *
  1522. * Added a field to struct build_id_event that broke the file
  1523. * format.
  1524. *
  1525. * Since the kernel build-id is the first entry, process the
  1526. * table using the old format if the well known
  1527. * '[kernel.kallsyms]' string for the kernel build-id has the
  1528. * first 4 characters chopped off (where the pid_t sits).
  1529. */
  1530. if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
  1531. if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
  1532. return -1;
  1533. return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
  1534. }
  1535. __event_process_build_id(&bev, filename, session);
  1536. offset += bev.header.size;
  1537. }
  1538. err = 0;
  1539. out:
  1540. return err;
  1541. }
  1542. static int perf_file_section__process(struct perf_file_section *section,
  1543. struct perf_header *ph,
  1544. int feat, int fd, void *data __used)
  1545. {
  1546. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  1547. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  1548. "%d, continuing...\n", section->offset, feat);
  1549. return 0;
  1550. }
  1551. switch (feat) {
  1552. case HEADER_TRACE_INFO:
  1553. trace_report(fd, false);
  1554. break;
  1555. case HEADER_BUILD_ID:
  1556. if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
  1557. pr_debug("Failed to read buildids, continuing...\n");
  1558. break;
  1559. case HEADER_HOSTNAME:
  1560. case HEADER_OSRELEASE:
  1561. case HEADER_VERSION:
  1562. case HEADER_ARCH:
  1563. case HEADER_NRCPUS:
  1564. case HEADER_CPUDESC:
  1565. case HEADER_CPUID:
  1566. case HEADER_TOTAL_MEM:
  1567. case HEADER_CMDLINE:
  1568. case HEADER_EVENT_DESC:
  1569. case HEADER_CPU_TOPOLOGY:
  1570. case HEADER_NUMA_TOPOLOGY:
  1571. break;
  1572. default:
  1573. pr_debug("unknown feature %d, continuing...\n", feat);
  1574. }
  1575. return 0;
  1576. }
  1577. static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
  1578. struct perf_header *ph, int fd,
  1579. bool repipe)
  1580. {
  1581. if (readn(fd, header, sizeof(*header)) <= 0 ||
  1582. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  1583. return -1;
  1584. if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
  1585. return -1;
  1586. if (header->size != sizeof(*header)) {
  1587. u64 size = bswap_64(header->size);
  1588. if (size != sizeof(*header))
  1589. return -1;
  1590. ph->needs_swap = true;
  1591. }
  1592. return 0;
  1593. }
  1594. static int perf_header__read_pipe(struct perf_session *session, int fd)
  1595. {
  1596. struct perf_header *header = &session->header;
  1597. struct perf_pipe_file_header f_header;
  1598. if (perf_file_header__read_pipe(&f_header, header, fd,
  1599. session->repipe) < 0) {
  1600. pr_debug("incompatible file format\n");
  1601. return -EINVAL;
  1602. }
  1603. session->fd = fd;
  1604. return 0;
  1605. }
  1606. int perf_session__read_header(struct perf_session *session, int fd)
  1607. {
  1608. struct perf_header *header = &session->header;
  1609. struct perf_file_header f_header;
  1610. struct perf_file_attr f_attr;
  1611. u64 f_id;
  1612. int nr_attrs, nr_ids, i, j;
  1613. session->evlist = perf_evlist__new(NULL, NULL);
  1614. if (session->evlist == NULL)
  1615. return -ENOMEM;
  1616. if (session->fd_pipe)
  1617. return perf_header__read_pipe(session, fd);
  1618. if (perf_file_header__read(&f_header, header, fd) < 0) {
  1619. pr_debug("incompatible file format\n");
  1620. return -EINVAL;
  1621. }
  1622. nr_attrs = f_header.attrs.size / sizeof(f_attr);
  1623. lseek(fd, f_header.attrs.offset, SEEK_SET);
  1624. for (i = 0; i < nr_attrs; i++) {
  1625. struct perf_evsel *evsel;
  1626. off_t tmp;
  1627. if (readn(fd, &f_attr, sizeof(f_attr)) <= 0)
  1628. goto out_errno;
  1629. if (header->needs_swap)
  1630. perf_event__attr_swap(&f_attr.attr);
  1631. tmp = lseek(fd, 0, SEEK_CUR);
  1632. evsel = perf_evsel__new(&f_attr.attr, i);
  1633. if (evsel == NULL)
  1634. goto out_delete_evlist;
  1635. /*
  1636. * Do it before so that if perf_evsel__alloc_id fails, this
  1637. * entry gets purged too at perf_evlist__delete().
  1638. */
  1639. perf_evlist__add(session->evlist, evsel);
  1640. nr_ids = f_attr.ids.size / sizeof(u64);
  1641. /*
  1642. * We don't have the cpu and thread maps on the header, so
  1643. * for allocating the perf_sample_id table we fake 1 cpu and
  1644. * hattr->ids threads.
  1645. */
  1646. if (perf_evsel__alloc_id(evsel, 1, nr_ids))
  1647. goto out_delete_evlist;
  1648. lseek(fd, f_attr.ids.offset, SEEK_SET);
  1649. for (j = 0; j < nr_ids; j++) {
  1650. if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
  1651. goto out_errno;
  1652. perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
  1653. }
  1654. lseek(fd, tmp, SEEK_SET);
  1655. }
  1656. if (f_header.event_types.size) {
  1657. lseek(fd, f_header.event_types.offset, SEEK_SET);
  1658. events = malloc(f_header.event_types.size);
  1659. if (events == NULL)
  1660. return -ENOMEM;
  1661. if (perf_header__getbuffer64(header, fd, events,
  1662. f_header.event_types.size))
  1663. goto out_errno;
  1664. event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
  1665. }
  1666. perf_header__process_sections(header, fd, NULL,
  1667. perf_file_section__process);
  1668. lseek(fd, header->data_offset, SEEK_SET);
  1669. header->frozen = 1;
  1670. return 0;
  1671. out_errno:
  1672. return -errno;
  1673. out_delete_evlist:
  1674. perf_evlist__delete(session->evlist);
  1675. session->evlist = NULL;
  1676. return -ENOMEM;
  1677. }
  1678. int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
  1679. perf_event__handler_t process,
  1680. struct perf_session *session)
  1681. {
  1682. union perf_event *ev;
  1683. size_t size;
  1684. int err;
  1685. size = sizeof(struct perf_event_attr);
  1686. size = ALIGN(size, sizeof(u64));
  1687. size += sizeof(struct perf_event_header);
  1688. size += ids * sizeof(u64);
  1689. ev = malloc(size);
  1690. if (ev == NULL)
  1691. return -ENOMEM;
  1692. ev->attr.attr = *attr;
  1693. memcpy(ev->attr.id, id, ids * sizeof(u64));
  1694. ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
  1695. ev->attr.header.size = size;
  1696. err = process(ev, NULL, session);
  1697. free(ev);
  1698. return err;
  1699. }
  1700. int perf_session__synthesize_attrs(struct perf_session *session,
  1701. perf_event__handler_t process)
  1702. {
  1703. struct perf_evsel *attr;
  1704. int err = 0;
  1705. list_for_each_entry(attr, &session->evlist->entries, node) {
  1706. err = perf_event__synthesize_attr(&attr->attr, attr->ids,
  1707. attr->id, process, session);
  1708. if (err) {
  1709. pr_debug("failed to create perf header attribute\n");
  1710. return err;
  1711. }
  1712. }
  1713. return err;
  1714. }
  1715. int perf_event__process_attr(union perf_event *event,
  1716. struct perf_session *session)
  1717. {
  1718. unsigned int i, ids, n_ids;
  1719. struct perf_evsel *evsel;
  1720. if (session->evlist == NULL) {
  1721. session->evlist = perf_evlist__new(NULL, NULL);
  1722. if (session->evlist == NULL)
  1723. return -ENOMEM;
  1724. }
  1725. evsel = perf_evsel__new(&event->attr.attr,
  1726. session->evlist->nr_entries);
  1727. if (evsel == NULL)
  1728. return -ENOMEM;
  1729. perf_evlist__add(session->evlist, evsel);
  1730. ids = event->header.size;
  1731. ids -= (void *)&event->attr.id - (void *)event;
  1732. n_ids = ids / sizeof(u64);
  1733. /*
  1734. * We don't have the cpu and thread maps on the header, so
  1735. * for allocating the perf_sample_id table we fake 1 cpu and
  1736. * hattr->ids threads.
  1737. */
  1738. if (perf_evsel__alloc_id(evsel, 1, n_ids))
  1739. return -ENOMEM;
  1740. for (i = 0; i < n_ids; i++) {
  1741. perf_evlist__id_add(session->evlist, evsel, 0, i,
  1742. event->attr.id[i]);
  1743. }
  1744. perf_session__update_sample_type(session);
  1745. return 0;
  1746. }
  1747. int perf_event__synthesize_event_type(u64 event_id, char *name,
  1748. perf_event__handler_t process,
  1749. struct perf_session *session)
  1750. {
  1751. union perf_event ev;
  1752. size_t size = 0;
  1753. int err = 0;
  1754. memset(&ev, 0, sizeof(ev));
  1755. ev.event_type.event_type.event_id = event_id;
  1756. memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
  1757. strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
  1758. ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
  1759. size = strlen(name);
  1760. size = ALIGN(size, sizeof(u64));
  1761. ev.event_type.header.size = sizeof(ev.event_type) -
  1762. (sizeof(ev.event_type.event_type.name) - size);
  1763. err = process(&ev, NULL, session);
  1764. return err;
  1765. }
  1766. int perf_event__synthesize_event_types(perf_event__handler_t process,
  1767. struct perf_session *session)
  1768. {
  1769. struct perf_trace_event_type *type;
  1770. int i, err = 0;
  1771. for (i = 0; i < event_count; i++) {
  1772. type = &events[i];
  1773. err = perf_event__synthesize_event_type(type->event_id,
  1774. type->name, process,
  1775. session);
  1776. if (err) {
  1777. pr_debug("failed to create perf header event type\n");
  1778. return err;
  1779. }
  1780. }
  1781. return err;
  1782. }
  1783. int perf_event__process_event_type(union perf_event *event,
  1784. struct perf_session *session __unused)
  1785. {
  1786. if (perf_header__push_event(event->event_type.event_type.event_id,
  1787. event->event_type.event_type.name) < 0)
  1788. return -ENOMEM;
  1789. return 0;
  1790. }
  1791. int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
  1792. perf_event__handler_t process,
  1793. struct perf_session *session __unused)
  1794. {
  1795. union perf_event ev;
  1796. struct tracing_data *tdata;
  1797. ssize_t size = 0, aligned_size = 0, padding;
  1798. int err __used = 0;
  1799. /*
  1800. * We are going to store the size of the data followed
  1801. * by the data contents. Since the fd descriptor is a pipe,
  1802. * we cannot seek back to store the size of the data once
  1803. * we know it. Instead we:
  1804. *
  1805. * - write the tracing data to the temp file
  1806. * - get/write the data size to pipe
  1807. * - write the tracing data from the temp file
  1808. * to the pipe
  1809. */
  1810. tdata = tracing_data_get(&evlist->entries, fd, true);
  1811. if (!tdata)
  1812. return -1;
  1813. memset(&ev, 0, sizeof(ev));
  1814. ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
  1815. size = tdata->size;
  1816. aligned_size = ALIGN(size, sizeof(u64));
  1817. padding = aligned_size - size;
  1818. ev.tracing_data.header.size = sizeof(ev.tracing_data);
  1819. ev.tracing_data.size = aligned_size;
  1820. process(&ev, NULL, session);
  1821. /*
  1822. * The put function will copy all the tracing data
  1823. * stored in temp file to the pipe.
  1824. */
  1825. tracing_data_put(tdata);
  1826. write_padded(fd, NULL, 0, padding);
  1827. return aligned_size;
  1828. }
  1829. int perf_event__process_tracing_data(union perf_event *event,
  1830. struct perf_session *session)
  1831. {
  1832. ssize_t size_read, padding, size = event->tracing_data.size;
  1833. off_t offset = lseek(session->fd, 0, SEEK_CUR);
  1834. char buf[BUFSIZ];
  1835. /* setup for reading amidst mmap */
  1836. lseek(session->fd, offset + sizeof(struct tracing_data_event),
  1837. SEEK_SET);
  1838. size_read = trace_report(session->fd, session->repipe);
  1839. padding = ALIGN(size_read, sizeof(u64)) - size_read;
  1840. if (read(session->fd, buf, padding) < 0)
  1841. die("reading input file");
  1842. if (session->repipe) {
  1843. int retw = write(STDOUT_FILENO, buf, padding);
  1844. if (retw <= 0 || retw != padding)
  1845. die("repiping tracing data padding");
  1846. }
  1847. if (size_read + padding != size)
  1848. die("tracing data size mismatch");
  1849. return size_read + padding;
  1850. }
  1851. int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
  1852. perf_event__handler_t process,
  1853. struct machine *machine,
  1854. struct perf_session *session)
  1855. {
  1856. union perf_event ev;
  1857. size_t len;
  1858. int err = 0;
  1859. if (!pos->hit)
  1860. return err;
  1861. memset(&ev, 0, sizeof(ev));
  1862. len = pos->long_name_len + 1;
  1863. len = ALIGN(len, NAME_ALIGN);
  1864. memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
  1865. ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
  1866. ev.build_id.header.misc = misc;
  1867. ev.build_id.pid = machine->pid;
  1868. ev.build_id.header.size = sizeof(ev.build_id) + len;
  1869. memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
  1870. err = process(&ev, NULL, session);
  1871. return err;
  1872. }
  1873. int perf_event__process_build_id(union perf_event *event,
  1874. struct perf_session *session)
  1875. {
  1876. __event_process_build_id(&event->build_id,
  1877. event->build_id.filename,
  1878. session);
  1879. return 0;
  1880. }
  1881. void disable_buildid_cache(void)
  1882. {
  1883. no_buildid_cache = true;
  1884. }