turbostat.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433
  1. /*
  2. * turbostat -- show CPU frequency and C-state residency
  3. * on modern Intel turbo-capable processors.
  4. *
  5. * Copyright (c) 2013 Intel Corporation.
  6. * Len Brown <len.brown@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  20. */
  21. #define _GNU_SOURCE
  22. #include <asm/msr.h>
  23. #include <stdio.h>
  24. #include <unistd.h>
  25. #include <sys/types.h>
  26. #include <sys/wait.h>
  27. #include <sys/stat.h>
  28. #include <sys/resource.h>
  29. #include <fcntl.h>
  30. #include <signal.h>
  31. #include <sys/time.h>
  32. #include <stdlib.h>
  33. #include <dirent.h>
  34. #include <string.h>
  35. #include <ctype.h>
  36. #include <sched.h>
  37. char *proc_stat = "/proc/stat";
  38. unsigned int interval_sec = 5; /* set with -i interval_sec */
  39. unsigned int verbose; /* set with -v */
  40. unsigned int rapl_verbose; /* set with -R */
  41. unsigned int thermal_verbose; /* set with -T */
  42. unsigned int summary_only; /* set with -s */
  43. unsigned int skip_c0;
  44. unsigned int skip_c1;
  45. unsigned int do_nhm_cstates;
  46. unsigned int do_snb_cstates;
  47. unsigned int do_c8_c9_c10;
  48. unsigned int do_slm_cstates;
  49. unsigned int use_c1_residency_msr;
  50. unsigned int has_aperf;
  51. unsigned int has_epb;
  52. unsigned int units = 1000000000; /* Ghz etc */
  53. unsigned int genuine_intel;
  54. unsigned int has_invariant_tsc;
  55. unsigned int do_nehalem_platform_info;
  56. unsigned int do_nehalem_turbo_ratio_limit;
  57. unsigned int do_ivt_turbo_ratio_limit;
  58. unsigned int extra_msr_offset32;
  59. unsigned int extra_msr_offset64;
  60. unsigned int extra_delta_offset32;
  61. unsigned int extra_delta_offset64;
  62. int do_smi;
  63. double bclk;
  64. unsigned int show_pkg;
  65. unsigned int show_core;
  66. unsigned int show_cpu;
  67. unsigned int show_pkg_only;
  68. unsigned int show_core_only;
  69. char *output_buffer, *outp;
  70. unsigned int do_rapl;
  71. unsigned int do_dts;
  72. unsigned int do_ptm;
  73. unsigned int tcc_activation_temp;
  74. unsigned int tcc_activation_temp_override;
  75. double rapl_power_units, rapl_energy_units, rapl_time_units;
  76. double rapl_joule_counter_range;
  77. #define RAPL_PKG (1 << 0)
  78. #define RAPL_CORES (1 << 1)
  79. #define RAPL_GFX (1 << 2)
  80. #define RAPL_DRAM (1 << 3)
  81. #define RAPL_PKG_PERF_STATUS (1 << 4)
  82. #define RAPL_DRAM_PERF_STATUS (1 << 5)
  83. #define RAPL_PKG_POWER_INFO (1 << 6)
  84. #define RAPL_CORE_POLICY (1 << 7)
  85. #define TJMAX_DEFAULT 100
  86. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  87. int aperf_mperf_unstable;
  88. int backwards_count;
  89. char *progname;
  90. cpu_set_t *cpu_present_set, *cpu_affinity_set;
  91. size_t cpu_present_setsize, cpu_affinity_setsize;
  92. struct thread_data {
  93. unsigned long long tsc;
  94. unsigned long long aperf;
  95. unsigned long long mperf;
  96. unsigned long long c1;
  97. unsigned long long extra_msr64;
  98. unsigned long long extra_delta64;
  99. unsigned long long extra_msr32;
  100. unsigned long long extra_delta32;
  101. unsigned int smi_count;
  102. unsigned int cpu_id;
  103. unsigned int flags;
  104. #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
  105. #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
  106. } *thread_even, *thread_odd;
  107. struct core_data {
  108. unsigned long long c3;
  109. unsigned long long c6;
  110. unsigned long long c7;
  111. unsigned int core_temp_c;
  112. unsigned int core_id;
  113. } *core_even, *core_odd;
  114. struct pkg_data {
  115. unsigned long long pc2;
  116. unsigned long long pc3;
  117. unsigned long long pc6;
  118. unsigned long long pc7;
  119. unsigned long long pc8;
  120. unsigned long long pc9;
  121. unsigned long long pc10;
  122. unsigned int package_id;
  123. unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */
  124. unsigned int energy_dram; /* MSR_DRAM_ENERGY_STATUS */
  125. unsigned int energy_cores; /* MSR_PP0_ENERGY_STATUS */
  126. unsigned int energy_gfx; /* MSR_PP1_ENERGY_STATUS */
  127. unsigned int rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
  128. unsigned int rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
  129. unsigned int pkg_temp_c;
  130. } *package_even, *package_odd;
  131. #define ODD_COUNTERS thread_odd, core_odd, package_odd
  132. #define EVEN_COUNTERS thread_even, core_even, package_even
  133. #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
  134. (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
  135. topo.num_threads_per_core + \
  136. (core_no) * topo.num_threads_per_core + (thread_no))
  137. #define GET_CORE(core_base, core_no, pkg_no) \
  138. (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
  139. #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
  140. struct system_summary {
  141. struct thread_data threads;
  142. struct core_data cores;
  143. struct pkg_data packages;
  144. } sum, average;
  145. struct topo_params {
  146. int num_packages;
  147. int num_cpus;
  148. int num_cores;
  149. int max_cpu_num;
  150. int num_cores_per_pkg;
  151. int num_threads_per_core;
  152. } topo;
  153. struct timeval tv_even, tv_odd, tv_delta;
  154. void setup_all_buffers(void);
  155. int cpu_is_not_present(int cpu)
  156. {
  157. return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
  158. }
  159. /*
  160. * run func(thread, core, package) in topology order
  161. * skip non-present cpus
  162. */
  163. int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
  164. struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
  165. {
  166. int retval, pkg_no, core_no, thread_no;
  167. for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
  168. for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
  169. for (thread_no = 0; thread_no <
  170. topo.num_threads_per_core; ++thread_no) {
  171. struct thread_data *t;
  172. struct core_data *c;
  173. struct pkg_data *p;
  174. t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
  175. if (cpu_is_not_present(t->cpu_id))
  176. continue;
  177. c = GET_CORE(core_base, core_no, pkg_no);
  178. p = GET_PKG(pkg_base, pkg_no);
  179. retval = func(t, c, p);
  180. if (retval)
  181. return retval;
  182. }
  183. }
  184. }
  185. return 0;
  186. }
  187. int cpu_migrate(int cpu)
  188. {
  189. CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
  190. CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
  191. if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
  192. return -1;
  193. else
  194. return 0;
  195. }
  196. int get_msr(int cpu, off_t offset, unsigned long long *msr)
  197. {
  198. ssize_t retval;
  199. char pathname[32];
  200. int fd;
  201. sprintf(pathname, "/dev/cpu/%d/msr", cpu);
  202. fd = open(pathname, O_RDONLY);
  203. if (fd < 0)
  204. return -1;
  205. retval = pread(fd, msr, sizeof *msr, offset);
  206. close(fd);
  207. if (retval != sizeof *msr) {
  208. fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset);
  209. return -1;
  210. }
  211. return 0;
  212. }
  213. void print_header(void)
  214. {
  215. if (show_pkg)
  216. outp += sprintf(outp, "pk");
  217. if (show_pkg)
  218. outp += sprintf(outp, " ");
  219. if (show_core)
  220. outp += sprintf(outp, "cor");
  221. if (show_cpu)
  222. outp += sprintf(outp, " CPU");
  223. if (show_pkg || show_core || show_cpu)
  224. outp += sprintf(outp, " ");
  225. if (do_nhm_cstates)
  226. outp += sprintf(outp, " %%c0");
  227. if (has_aperf)
  228. outp += sprintf(outp, " GHz");
  229. outp += sprintf(outp, " TSC");
  230. if (do_smi)
  231. outp += sprintf(outp, " SMI");
  232. if (extra_delta_offset32)
  233. outp += sprintf(outp, " count 0x%03X", extra_delta_offset32);
  234. if (extra_delta_offset64)
  235. outp += sprintf(outp, " COUNT 0x%03X", extra_delta_offset64);
  236. if (extra_msr_offset32)
  237. outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset32);
  238. if (extra_msr_offset64)
  239. outp += sprintf(outp, " MSR 0x%03X", extra_msr_offset64);
  240. if (do_nhm_cstates)
  241. outp += sprintf(outp, " %%c1");
  242. if (do_nhm_cstates && !do_slm_cstates)
  243. outp += sprintf(outp, " %%c3");
  244. if (do_nhm_cstates)
  245. outp += sprintf(outp, " %%c6");
  246. if (do_snb_cstates)
  247. outp += sprintf(outp, " %%c7");
  248. if (do_dts)
  249. outp += sprintf(outp, " CTMP");
  250. if (do_ptm)
  251. outp += sprintf(outp, " PTMP");
  252. if (do_snb_cstates)
  253. outp += sprintf(outp, " %%pc2");
  254. if (do_nhm_cstates && !do_slm_cstates)
  255. outp += sprintf(outp, " %%pc3");
  256. if (do_nhm_cstates && !do_slm_cstates)
  257. outp += sprintf(outp, " %%pc6");
  258. if (do_snb_cstates)
  259. outp += sprintf(outp, " %%pc7");
  260. if (do_c8_c9_c10) {
  261. outp += sprintf(outp, " %%pc8");
  262. outp += sprintf(outp, " %%pc9");
  263. outp += sprintf(outp, " %%pc10");
  264. }
  265. if (do_rapl & RAPL_PKG)
  266. outp += sprintf(outp, " Pkg_W");
  267. if (do_rapl & RAPL_CORES)
  268. outp += sprintf(outp, " Cor_W");
  269. if (do_rapl & RAPL_GFX)
  270. outp += sprintf(outp, " GFX_W");
  271. if (do_rapl & RAPL_DRAM)
  272. outp += sprintf(outp, " RAM_W");
  273. if (do_rapl & RAPL_PKG_PERF_STATUS)
  274. outp += sprintf(outp, " PKG_%%");
  275. if (do_rapl & RAPL_DRAM_PERF_STATUS)
  276. outp += sprintf(outp, " RAM_%%");
  277. outp += sprintf(outp, "\n");
  278. }
  279. int dump_counters(struct thread_data *t, struct core_data *c,
  280. struct pkg_data *p)
  281. {
  282. fprintf(stderr, "t %p, c %p, p %p\n", t, c, p);
  283. if (t) {
  284. fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags);
  285. fprintf(stderr, "TSC: %016llX\n", t->tsc);
  286. fprintf(stderr, "aperf: %016llX\n", t->aperf);
  287. fprintf(stderr, "mperf: %016llX\n", t->mperf);
  288. fprintf(stderr, "c1: %016llX\n", t->c1);
  289. fprintf(stderr, "msr0x%x: %08llX\n",
  290. extra_delta_offset32, t->extra_delta32);
  291. fprintf(stderr, "msr0x%x: %016llX\n",
  292. extra_delta_offset64, t->extra_delta64);
  293. fprintf(stderr, "msr0x%x: %08llX\n",
  294. extra_msr_offset32, t->extra_msr32);
  295. fprintf(stderr, "msr0x%x: %016llX\n",
  296. extra_msr_offset64, t->extra_msr64);
  297. if (do_smi)
  298. fprintf(stderr, "SMI: %08X\n", t->smi_count);
  299. }
  300. if (c) {
  301. fprintf(stderr, "core: %d\n", c->core_id);
  302. fprintf(stderr, "c3: %016llX\n", c->c3);
  303. fprintf(stderr, "c6: %016llX\n", c->c6);
  304. fprintf(stderr, "c7: %016llX\n", c->c7);
  305. fprintf(stderr, "DTS: %dC\n", c->core_temp_c);
  306. }
  307. if (p) {
  308. fprintf(stderr, "package: %d\n", p->package_id);
  309. fprintf(stderr, "pc2: %016llX\n", p->pc2);
  310. fprintf(stderr, "pc3: %016llX\n", p->pc3);
  311. fprintf(stderr, "pc6: %016llX\n", p->pc6);
  312. fprintf(stderr, "pc7: %016llX\n", p->pc7);
  313. fprintf(stderr, "pc8: %016llX\n", p->pc8);
  314. fprintf(stderr, "pc9: %016llX\n", p->pc9);
  315. fprintf(stderr, "pc10: %016llX\n", p->pc10);
  316. fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
  317. fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
  318. fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
  319. fprintf(stderr, "Joules RAM: %0X\n", p->energy_dram);
  320. fprintf(stderr, "Throttle PKG: %0X\n", p->rapl_pkg_perf_status);
  321. fprintf(stderr, "Throttle RAM: %0X\n", p->rapl_dram_perf_status);
  322. fprintf(stderr, "PTM: %dC\n", p->pkg_temp_c);
  323. }
  324. return 0;
  325. }
  326. /*
  327. * column formatting convention & formats
  328. * package: "pk" 2 columns %2d
  329. * core: "cor" 3 columns %3d
  330. * CPU: "CPU" 3 columns %3d
  331. * Pkg_W: %6.2
  332. * Cor_W: %6.2
  333. * GFX_W: %5.2
  334. * RAM_W: %5.2
  335. * GHz: "GHz" 3 columns %3.2
  336. * TSC: "TSC" 3 columns %3.2
  337. * SMI: "SMI" 4 columns %4d
  338. * percentage " %pc3" %6.2
  339. * Perf Status percentage: %5.2
  340. * "CTMP" 4 columns %4d
  341. */
  342. int format_counters(struct thread_data *t, struct core_data *c,
  343. struct pkg_data *p)
  344. {
  345. double interval_float;
  346. char *fmt5, *fmt6;
  347. /* if showing only 1st thread in core and this isn't one, bail out */
  348. if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  349. return 0;
  350. /* if showing only 1st thread in pkg and this isn't one, bail out */
  351. if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  352. return 0;
  353. interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
  354. /* topo columns, print blanks on 1st (average) line */
  355. if (t == &average.threads) {
  356. if (show_pkg)
  357. outp += sprintf(outp, " ");
  358. if (show_pkg && show_core)
  359. outp += sprintf(outp, " ");
  360. if (show_core)
  361. outp += sprintf(outp, " ");
  362. if (show_cpu)
  363. outp += sprintf(outp, " " " ");
  364. } else {
  365. if (show_pkg) {
  366. if (p)
  367. outp += sprintf(outp, "%2d", p->package_id);
  368. else
  369. outp += sprintf(outp, " ");
  370. }
  371. if (show_pkg && show_core)
  372. outp += sprintf(outp, " ");
  373. if (show_core) {
  374. if (c)
  375. outp += sprintf(outp, "%3d", c->core_id);
  376. else
  377. outp += sprintf(outp, " ");
  378. }
  379. if (show_cpu)
  380. outp += sprintf(outp, " %3d", t->cpu_id);
  381. }
  382. /* %c0 */
  383. if (do_nhm_cstates) {
  384. if (show_pkg || show_core || show_cpu)
  385. outp += sprintf(outp, " ");
  386. if (!skip_c0)
  387. outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
  388. else
  389. outp += sprintf(outp, " ****");
  390. }
  391. /* GHz */
  392. if (has_aperf) {
  393. if (!aperf_mperf_unstable) {
  394. outp += sprintf(outp, " %3.2f",
  395. 1.0 * t->tsc / units * t->aperf /
  396. t->mperf / interval_float);
  397. } else {
  398. if (t->aperf > t->tsc || t->mperf > t->tsc) {
  399. outp += sprintf(outp, " ***");
  400. } else {
  401. outp += sprintf(outp, "%3.1f*",
  402. 1.0 * t->tsc /
  403. units * t->aperf /
  404. t->mperf / interval_float);
  405. }
  406. }
  407. }
  408. /* TSC */
  409. outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
  410. /* SMI */
  411. if (do_smi)
  412. outp += sprintf(outp, "%4d", t->smi_count);
  413. /* delta */
  414. if (extra_delta_offset32)
  415. outp += sprintf(outp, " %11llu", t->extra_delta32);
  416. /* DELTA */
  417. if (extra_delta_offset64)
  418. outp += sprintf(outp, " %11llu", t->extra_delta64);
  419. /* msr */
  420. if (extra_msr_offset32)
  421. outp += sprintf(outp, " 0x%08llx", t->extra_msr32);
  422. /* MSR */
  423. if (extra_msr_offset64)
  424. outp += sprintf(outp, " 0x%016llx", t->extra_msr64);
  425. if (do_nhm_cstates) {
  426. if (!skip_c1)
  427. outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
  428. else
  429. outp += sprintf(outp, " ****");
  430. }
  431. /* print per-core data only for 1st thread in core */
  432. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  433. goto done;
  434. if (do_nhm_cstates && !do_slm_cstates)
  435. outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
  436. if (do_nhm_cstates)
  437. outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
  438. if (do_snb_cstates)
  439. outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
  440. if (do_dts)
  441. outp += sprintf(outp, " %4d", c->core_temp_c);
  442. /* print per-package data only for 1st core in package */
  443. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  444. goto done;
  445. if (do_ptm)
  446. outp += sprintf(outp, " %4d", p->pkg_temp_c);
  447. if (do_snb_cstates)
  448. outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
  449. if (do_nhm_cstates && !do_slm_cstates)
  450. outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
  451. if (do_nhm_cstates && !do_slm_cstates)
  452. outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
  453. if (do_snb_cstates)
  454. outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
  455. if (do_c8_c9_c10) {
  456. outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
  457. outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
  458. outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
  459. }
  460. /*
  461. * If measurement interval exceeds minimum RAPL Joule Counter range,
  462. * indicate that results are suspect by printing "**" in fraction place.
  463. */
  464. if (interval_float < rapl_joule_counter_range) {
  465. fmt5 = " %5.2f";
  466. fmt6 = " %6.2f";
  467. } else {
  468. fmt5 = " %3.0f**";
  469. fmt6 = " %4.0f**";
  470. }
  471. if (do_rapl & RAPL_PKG)
  472. outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
  473. if (do_rapl & RAPL_CORES)
  474. outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
  475. if (do_rapl & RAPL_GFX)
  476. outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float);
  477. if (do_rapl & RAPL_DRAM)
  478. outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
  479. if (do_rapl & RAPL_PKG_PERF_STATUS )
  480. outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
  481. if (do_rapl & RAPL_DRAM_PERF_STATUS )
  482. outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
  483. done:
  484. outp += sprintf(outp, "\n");
  485. return 0;
  486. }
  487. void flush_stdout()
  488. {
  489. fputs(output_buffer, stdout);
  490. fflush(stdout);
  491. outp = output_buffer;
  492. }
  493. void flush_stderr()
  494. {
  495. fputs(output_buffer, stderr);
  496. outp = output_buffer;
  497. }
  498. void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  499. {
  500. static int printed;
  501. if (!printed || !summary_only)
  502. print_header();
  503. if (topo.num_cpus > 1)
  504. format_counters(&average.threads, &average.cores,
  505. &average.packages);
  506. printed = 1;
  507. if (summary_only)
  508. return;
  509. for_all_cpus(format_counters, t, c, p);
  510. }
  511. #define DELTA_WRAP32(new, old) \
  512. if (new > old) { \
  513. old = new - old; \
  514. } else { \
  515. old = 0x100000000 + new - old; \
  516. }
  517. void
  518. delta_package(struct pkg_data *new, struct pkg_data *old)
  519. {
  520. old->pc2 = new->pc2 - old->pc2;
  521. old->pc3 = new->pc3 - old->pc3;
  522. old->pc6 = new->pc6 - old->pc6;
  523. old->pc7 = new->pc7 - old->pc7;
  524. old->pc8 = new->pc8 - old->pc8;
  525. old->pc9 = new->pc9 - old->pc9;
  526. old->pc10 = new->pc10 - old->pc10;
  527. old->pkg_temp_c = new->pkg_temp_c;
  528. DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
  529. DELTA_WRAP32(new->energy_cores, old->energy_cores);
  530. DELTA_WRAP32(new->energy_gfx, old->energy_gfx);
  531. DELTA_WRAP32(new->energy_dram, old->energy_dram);
  532. DELTA_WRAP32(new->rapl_pkg_perf_status, old->rapl_pkg_perf_status);
  533. DELTA_WRAP32(new->rapl_dram_perf_status, old->rapl_dram_perf_status);
  534. }
  535. void
  536. delta_core(struct core_data *new, struct core_data *old)
  537. {
  538. old->c3 = new->c3 - old->c3;
  539. old->c6 = new->c6 - old->c6;
  540. old->c7 = new->c7 - old->c7;
  541. old->core_temp_c = new->core_temp_c;
  542. }
  543. /*
  544. * old = new - old
  545. */
  546. void
  547. delta_thread(struct thread_data *new, struct thread_data *old,
  548. struct core_data *core_delta)
  549. {
  550. old->tsc = new->tsc - old->tsc;
  551. /* check for TSC < 1 Mcycles over interval */
  552. if (old->tsc < (1000 * 1000)) {
  553. fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n");
  554. fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n");
  555. fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n");
  556. exit(-3);
  557. }
  558. old->c1 = new->c1 - old->c1;
  559. if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
  560. old->aperf = new->aperf - old->aperf;
  561. old->mperf = new->mperf - old->mperf;
  562. } else {
  563. if (!aperf_mperf_unstable) {
  564. fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
  565. fprintf(stderr, "* Frequency results do not cover entire interval *\n");
  566. fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
  567. aperf_mperf_unstable = 1;
  568. }
  569. /*
  570. * mperf delta is likely a huge "positive" number
  571. * can not use it for calculating c0 time
  572. */
  573. skip_c0 = 1;
  574. skip_c1 = 1;
  575. }
  576. if (use_c1_residency_msr) {
  577. /*
  578. * Some models have a dedicated C1 residency MSR,
  579. * which should be more accurate than the derivation below.
  580. */
  581. } else {
  582. /*
  583. * As counter collection is not atomic,
  584. * it is possible for mperf's non-halted cycles + idle states
  585. * to exceed TSC's all cycles: show c1 = 0% in that case.
  586. */
  587. if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
  588. old->c1 = 0;
  589. else {
  590. /* normal case, derive c1 */
  591. old->c1 = old->tsc - old->mperf - core_delta->c3
  592. - core_delta->c6 - core_delta->c7;
  593. }
  594. }
  595. if (old->mperf == 0) {
  596. if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
  597. old->mperf = 1; /* divide by 0 protection */
  598. }
  599. old->extra_delta32 = new->extra_delta32 - old->extra_delta32;
  600. old->extra_delta32 &= 0xFFFFFFFF;
  601. old->extra_delta64 = new->extra_delta64 - old->extra_delta64;
  602. /*
  603. * Extra MSR is just a snapshot, simply copy latest w/o subtracting
  604. */
  605. old->extra_msr32 = new->extra_msr32;
  606. old->extra_msr64 = new->extra_msr64;
  607. if (do_smi)
  608. old->smi_count = new->smi_count - old->smi_count;
  609. }
  610. int delta_cpu(struct thread_data *t, struct core_data *c,
  611. struct pkg_data *p, struct thread_data *t2,
  612. struct core_data *c2, struct pkg_data *p2)
  613. {
  614. /* calculate core delta only for 1st thread in core */
  615. if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
  616. delta_core(c, c2);
  617. /* always calculate thread delta */
  618. delta_thread(t, t2, c2); /* c2 is core delta */
  619. /* calculate package delta only for 1st core in package */
  620. if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
  621. delta_package(p, p2);
  622. return 0;
  623. }
  624. void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  625. {
  626. t->tsc = 0;
  627. t->aperf = 0;
  628. t->mperf = 0;
  629. t->c1 = 0;
  630. t->smi_count = 0;
  631. t->extra_delta32 = 0;
  632. t->extra_delta64 = 0;
  633. /* tells format_counters to dump all fields from this set */
  634. t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
  635. c->c3 = 0;
  636. c->c6 = 0;
  637. c->c7 = 0;
  638. c->core_temp_c = 0;
  639. p->pc2 = 0;
  640. p->pc3 = 0;
  641. p->pc6 = 0;
  642. p->pc7 = 0;
  643. p->pc8 = 0;
  644. p->pc9 = 0;
  645. p->pc10 = 0;
  646. p->energy_pkg = 0;
  647. p->energy_dram = 0;
  648. p->energy_cores = 0;
  649. p->energy_gfx = 0;
  650. p->rapl_pkg_perf_status = 0;
  651. p->rapl_dram_perf_status = 0;
  652. p->pkg_temp_c = 0;
  653. }
  654. int sum_counters(struct thread_data *t, struct core_data *c,
  655. struct pkg_data *p)
  656. {
  657. average.threads.tsc += t->tsc;
  658. average.threads.aperf += t->aperf;
  659. average.threads.mperf += t->mperf;
  660. average.threads.c1 += t->c1;
  661. average.threads.extra_delta32 += t->extra_delta32;
  662. average.threads.extra_delta64 += t->extra_delta64;
  663. /* sum per-core values only for 1st thread in core */
  664. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  665. return 0;
  666. average.cores.c3 += c->c3;
  667. average.cores.c6 += c->c6;
  668. average.cores.c7 += c->c7;
  669. average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
  670. /* sum per-pkg values only for 1st core in pkg */
  671. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  672. return 0;
  673. average.packages.pc2 += p->pc2;
  674. average.packages.pc3 += p->pc3;
  675. average.packages.pc6 += p->pc6;
  676. average.packages.pc7 += p->pc7;
  677. average.packages.pc8 += p->pc8;
  678. average.packages.pc9 += p->pc9;
  679. average.packages.pc10 += p->pc10;
  680. average.packages.energy_pkg += p->energy_pkg;
  681. average.packages.energy_dram += p->energy_dram;
  682. average.packages.energy_cores += p->energy_cores;
  683. average.packages.energy_gfx += p->energy_gfx;
  684. average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c);
  685. average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status;
  686. average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status;
  687. return 0;
  688. }
  689. /*
  690. * sum the counters for all cpus in the system
  691. * compute the weighted average
  692. */
  693. void compute_average(struct thread_data *t, struct core_data *c,
  694. struct pkg_data *p)
  695. {
  696. clear_counters(&average.threads, &average.cores, &average.packages);
  697. for_all_cpus(sum_counters, t, c, p);
  698. average.threads.tsc /= topo.num_cpus;
  699. average.threads.aperf /= topo.num_cpus;
  700. average.threads.mperf /= topo.num_cpus;
  701. average.threads.c1 /= topo.num_cpus;
  702. average.threads.extra_delta32 /= topo.num_cpus;
  703. average.threads.extra_delta32 &= 0xFFFFFFFF;
  704. average.threads.extra_delta64 /= topo.num_cpus;
  705. average.cores.c3 /= topo.num_cores;
  706. average.cores.c6 /= topo.num_cores;
  707. average.cores.c7 /= topo.num_cores;
  708. average.packages.pc2 /= topo.num_packages;
  709. average.packages.pc3 /= topo.num_packages;
  710. average.packages.pc6 /= topo.num_packages;
  711. average.packages.pc7 /= topo.num_packages;
  712. average.packages.pc8 /= topo.num_packages;
  713. average.packages.pc9 /= topo.num_packages;
  714. average.packages.pc10 /= topo.num_packages;
  715. }
  716. static unsigned long long rdtsc(void)
  717. {
  718. unsigned int low, high;
  719. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  720. return low | ((unsigned long long)high) << 32;
  721. }
  722. /*
  723. * get_counters(...)
  724. * migrate to cpu
  725. * acquire and record local counters for that cpu
  726. */
  727. int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  728. {
  729. int cpu = t->cpu_id;
  730. unsigned long long msr;
  731. if (cpu_migrate(cpu)) {
  732. fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
  733. return -1;
  734. }
  735. t->tsc = rdtsc(); /* we are running on local CPU of interest */
  736. if (has_aperf) {
  737. if (get_msr(cpu, MSR_IA32_APERF, &t->aperf))
  738. return -3;
  739. if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
  740. return -4;
  741. }
  742. if (do_smi) {
  743. if (get_msr(cpu, MSR_SMI_COUNT, &msr))
  744. return -5;
  745. t->smi_count = msr & 0xFFFFFFFF;
  746. }
  747. if (extra_delta_offset32) {
  748. if (get_msr(cpu, extra_delta_offset32, &msr))
  749. return -5;
  750. t->extra_delta32 = msr & 0xFFFFFFFF;
  751. }
  752. if (extra_delta_offset64)
  753. if (get_msr(cpu, extra_delta_offset64, &t->extra_delta64))
  754. return -5;
  755. if (extra_msr_offset32) {
  756. if (get_msr(cpu, extra_msr_offset32, &msr))
  757. return -5;
  758. t->extra_msr32 = msr & 0xFFFFFFFF;
  759. }
  760. if (extra_msr_offset64)
  761. if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
  762. return -5;
  763. if (use_c1_residency_msr) {
  764. if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
  765. return -6;
  766. }
  767. /* collect core counters only for 1st thread in core */
  768. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  769. return 0;
  770. if (do_nhm_cstates && !do_slm_cstates) {
  771. if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
  772. return -6;
  773. }
  774. if (do_nhm_cstates) {
  775. if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
  776. return -7;
  777. }
  778. if (do_snb_cstates)
  779. if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
  780. return -8;
  781. if (do_dts) {
  782. if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
  783. return -9;
  784. c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
  785. }
  786. /* collect package counters only for 1st core in package */
  787. if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  788. return 0;
  789. if (do_nhm_cstates && !do_slm_cstates) {
  790. if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
  791. return -9;
  792. if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
  793. return -10;
  794. }
  795. if (do_snb_cstates) {
  796. if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
  797. return -11;
  798. if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
  799. return -12;
  800. }
  801. if (do_c8_c9_c10) {
  802. if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
  803. return -13;
  804. if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
  805. return -13;
  806. if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
  807. return -13;
  808. }
  809. if (do_rapl & RAPL_PKG) {
  810. if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
  811. return -13;
  812. p->energy_pkg = msr & 0xFFFFFFFF;
  813. }
  814. if (do_rapl & RAPL_CORES) {
  815. if (get_msr(cpu, MSR_PP0_ENERGY_STATUS, &msr))
  816. return -14;
  817. p->energy_cores = msr & 0xFFFFFFFF;
  818. }
  819. if (do_rapl & RAPL_DRAM) {
  820. if (get_msr(cpu, MSR_DRAM_ENERGY_STATUS, &msr))
  821. return -15;
  822. p->energy_dram = msr & 0xFFFFFFFF;
  823. }
  824. if (do_rapl & RAPL_GFX) {
  825. if (get_msr(cpu, MSR_PP1_ENERGY_STATUS, &msr))
  826. return -16;
  827. p->energy_gfx = msr & 0xFFFFFFFF;
  828. }
  829. if (do_rapl & RAPL_PKG_PERF_STATUS) {
  830. if (get_msr(cpu, MSR_PKG_PERF_STATUS, &msr))
  831. return -16;
  832. p->rapl_pkg_perf_status = msr & 0xFFFFFFFF;
  833. }
  834. if (do_rapl & RAPL_DRAM_PERF_STATUS) {
  835. if (get_msr(cpu, MSR_DRAM_PERF_STATUS, &msr))
  836. return -16;
  837. p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
  838. }
  839. if (do_ptm) {
  840. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
  841. return -17;
  842. p->pkg_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
  843. }
  844. return 0;
  845. }
  846. void print_verbose_header(void)
  847. {
  848. unsigned long long msr;
  849. unsigned int ratio;
  850. if (!do_nehalem_platform_info)
  851. return;
  852. get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
  853. fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
  854. ratio = (msr >> 40) & 0xFF;
  855. fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
  856. ratio, bclk, ratio * bclk);
  857. ratio = (msr >> 8) & 0xFF;
  858. fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
  859. ratio, bclk, ratio * bclk);
  860. get_msr(0, MSR_IA32_POWER_CTL, &msr);
  861. fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
  862. msr, msr & 0x2 ? "EN" : "DIS");
  863. if (!do_ivt_turbo_ratio_limit)
  864. goto print_nhm_turbo_ratio_limits;
  865. get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT, &msr);
  866. fprintf(stderr, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
  867. ratio = (msr >> 56) & 0xFF;
  868. if (ratio)
  869. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
  870. ratio, bclk, ratio * bclk);
  871. ratio = (msr >> 48) & 0xFF;
  872. if (ratio)
  873. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
  874. ratio, bclk, ratio * bclk);
  875. ratio = (msr >> 40) & 0xFF;
  876. if (ratio)
  877. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
  878. ratio, bclk, ratio * bclk);
  879. ratio = (msr >> 32) & 0xFF;
  880. if (ratio)
  881. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
  882. ratio, bclk, ratio * bclk);
  883. ratio = (msr >> 24) & 0xFF;
  884. if (ratio)
  885. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
  886. ratio, bclk, ratio * bclk);
  887. ratio = (msr >> 16) & 0xFF;
  888. if (ratio)
  889. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
  890. ratio, bclk, ratio * bclk);
  891. ratio = (msr >> 8) & 0xFF;
  892. if (ratio)
  893. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
  894. ratio, bclk, ratio * bclk);
  895. ratio = (msr >> 0) & 0xFF;
  896. if (ratio)
  897. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
  898. ratio, bclk, ratio * bclk);
  899. print_nhm_turbo_ratio_limits:
  900. get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
  901. #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
  902. #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
  903. fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
  904. fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ",
  905. (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
  906. (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "",
  907. (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "",
  908. (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "",
  909. (msr & (1 << 15)) ? "" : "UN",
  910. (unsigned int)msr & 7);
  911. switch(msr & 0x7) {
  912. case 0:
  913. fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
  914. break;
  915. case 1:
  916. fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
  917. break;
  918. case 2:
  919. fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
  920. break;
  921. case 3:
  922. fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
  923. break;
  924. case 4:
  925. fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
  926. break;
  927. case 5:
  928. fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
  929. break;
  930. case 6:
  931. fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
  932. break;
  933. case 7:
  934. fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
  935. break;
  936. default:
  937. fprintf(stderr, "invalid");
  938. }
  939. fprintf(stderr, ")\n");
  940. if (!do_nehalem_turbo_ratio_limit)
  941. return;
  942. get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
  943. fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
  944. ratio = (msr >> 56) & 0xFF;
  945. if (ratio)
  946. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
  947. ratio, bclk, ratio * bclk);
  948. ratio = (msr >> 48) & 0xFF;
  949. if (ratio)
  950. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
  951. ratio, bclk, ratio * bclk);
  952. ratio = (msr >> 40) & 0xFF;
  953. if (ratio)
  954. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
  955. ratio, bclk, ratio * bclk);
  956. ratio = (msr >> 32) & 0xFF;
  957. if (ratio)
  958. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
  959. ratio, bclk, ratio * bclk);
  960. ratio = (msr >> 24) & 0xFF;
  961. if (ratio)
  962. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
  963. ratio, bclk, ratio * bclk);
  964. ratio = (msr >> 16) & 0xFF;
  965. if (ratio)
  966. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
  967. ratio, bclk, ratio * bclk);
  968. ratio = (msr >> 8) & 0xFF;
  969. if (ratio)
  970. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
  971. ratio, bclk, ratio * bclk);
  972. ratio = (msr >> 0) & 0xFF;
  973. if (ratio)
  974. fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
  975. ratio, bclk, ratio * bclk);
  976. }
  977. void free_all_buffers(void)
  978. {
  979. CPU_FREE(cpu_present_set);
  980. cpu_present_set = NULL;
  981. cpu_present_set = 0;
  982. CPU_FREE(cpu_affinity_set);
  983. cpu_affinity_set = NULL;
  984. cpu_affinity_setsize = 0;
  985. free(thread_even);
  986. free(core_even);
  987. free(package_even);
  988. thread_even = NULL;
  989. core_even = NULL;
  990. package_even = NULL;
  991. free(thread_odd);
  992. free(core_odd);
  993. free(package_odd);
  994. thread_odd = NULL;
  995. core_odd = NULL;
  996. package_odd = NULL;
  997. free(output_buffer);
  998. output_buffer = NULL;
  999. outp = NULL;
  1000. }
  1001. /*
  1002. * cpu_is_first_sibling_in_core(cpu)
  1003. * return 1 if given CPU is 1st HT sibling in the core
  1004. */
  1005. int cpu_is_first_sibling_in_core(int cpu)
  1006. {
  1007. char path[64];
  1008. FILE *filep;
  1009. int first_cpu;
  1010. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
  1011. filep = fopen(path, "r");
  1012. if (filep == NULL) {
  1013. perror(path);
  1014. exit(1);
  1015. }
  1016. fscanf(filep, "%d", &first_cpu);
  1017. fclose(filep);
  1018. return (cpu == first_cpu);
  1019. }
  1020. /*
  1021. * cpu_is_first_core_in_package(cpu)
  1022. * return 1 if given CPU is 1st core in package
  1023. */
  1024. int cpu_is_first_core_in_package(int cpu)
  1025. {
  1026. char path[64];
  1027. FILE *filep;
  1028. int first_cpu;
  1029. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
  1030. filep = fopen(path, "r");
  1031. if (filep == NULL) {
  1032. perror(path);
  1033. exit(1);
  1034. }
  1035. fscanf(filep, "%d", &first_cpu);
  1036. fclose(filep);
  1037. return (cpu == first_cpu);
  1038. }
  1039. int get_physical_package_id(int cpu)
  1040. {
  1041. char path[80];
  1042. FILE *filep;
  1043. int pkg;
  1044. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
  1045. filep = fopen(path, "r");
  1046. if (filep == NULL) {
  1047. perror(path);
  1048. exit(1);
  1049. }
  1050. fscanf(filep, "%d", &pkg);
  1051. fclose(filep);
  1052. return pkg;
  1053. }
  1054. int get_core_id(int cpu)
  1055. {
  1056. char path[80];
  1057. FILE *filep;
  1058. int core;
  1059. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
  1060. filep = fopen(path, "r");
  1061. if (filep == NULL) {
  1062. perror(path);
  1063. exit(1);
  1064. }
  1065. fscanf(filep, "%d", &core);
  1066. fclose(filep);
  1067. return core;
  1068. }
  1069. int get_num_ht_siblings(int cpu)
  1070. {
  1071. char path[80];
  1072. FILE *filep;
  1073. int sib1, sib2;
  1074. int matches;
  1075. char character;
  1076. sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
  1077. filep = fopen(path, "r");
  1078. if (filep == NULL) {
  1079. perror(path);
  1080. exit(1);
  1081. }
  1082. /*
  1083. * file format:
  1084. * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
  1085. * otherwinse 1 sibling (self).
  1086. */
  1087. matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
  1088. fclose(filep);
  1089. if (matches == 3)
  1090. return 2;
  1091. else
  1092. return 1;
  1093. }
  1094. /*
  1095. * run func(thread, core, package) in topology order
  1096. * skip non-present cpus
  1097. */
  1098. int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
  1099. struct pkg_data *, struct thread_data *, struct core_data *,
  1100. struct pkg_data *), struct thread_data *thread_base,
  1101. struct core_data *core_base, struct pkg_data *pkg_base,
  1102. struct thread_data *thread_base2, struct core_data *core_base2,
  1103. struct pkg_data *pkg_base2)
  1104. {
  1105. int retval, pkg_no, core_no, thread_no;
  1106. for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
  1107. for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
  1108. for (thread_no = 0; thread_no <
  1109. topo.num_threads_per_core; ++thread_no) {
  1110. struct thread_data *t, *t2;
  1111. struct core_data *c, *c2;
  1112. struct pkg_data *p, *p2;
  1113. t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
  1114. if (cpu_is_not_present(t->cpu_id))
  1115. continue;
  1116. t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
  1117. c = GET_CORE(core_base, core_no, pkg_no);
  1118. c2 = GET_CORE(core_base2, core_no, pkg_no);
  1119. p = GET_PKG(pkg_base, pkg_no);
  1120. p2 = GET_PKG(pkg_base2, pkg_no);
  1121. retval = func(t, c, p, t2, c2, p2);
  1122. if (retval)
  1123. return retval;
  1124. }
  1125. }
  1126. }
  1127. return 0;
  1128. }
  1129. /*
  1130. * run func(cpu) on every cpu in /proc/stat
  1131. * return max_cpu number
  1132. */
  1133. int for_all_proc_cpus(int (func)(int))
  1134. {
  1135. FILE *fp;
  1136. int cpu_num;
  1137. int retval;
  1138. fp = fopen(proc_stat, "r");
  1139. if (fp == NULL) {
  1140. perror(proc_stat);
  1141. exit(1);
  1142. }
  1143. retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
  1144. if (retval != 0) {
  1145. perror("/proc/stat format");
  1146. exit(1);
  1147. }
  1148. while (1) {
  1149. retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
  1150. if (retval != 1)
  1151. break;
  1152. retval = func(cpu_num);
  1153. if (retval) {
  1154. fclose(fp);
  1155. return(retval);
  1156. }
  1157. }
  1158. fclose(fp);
  1159. return 0;
  1160. }
  1161. void re_initialize(void)
  1162. {
  1163. free_all_buffers();
  1164. setup_all_buffers();
  1165. printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
  1166. }
  1167. /*
  1168. * count_cpus()
  1169. * remember the last one seen, it will be the max
  1170. */
  1171. int count_cpus(int cpu)
  1172. {
  1173. if (topo.max_cpu_num < cpu)
  1174. topo.max_cpu_num = cpu;
  1175. topo.num_cpus += 1;
  1176. return 0;
  1177. }
  1178. int mark_cpu_present(int cpu)
  1179. {
  1180. CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
  1181. return 0;
  1182. }
  1183. void turbostat_loop()
  1184. {
  1185. int retval;
  1186. int restarted = 0;
  1187. restart:
  1188. restarted++;
  1189. retval = for_all_cpus(get_counters, EVEN_COUNTERS);
  1190. if (retval < -1) {
  1191. exit(retval);
  1192. } else if (retval == -1) {
  1193. if (restarted > 1) {
  1194. exit(retval);
  1195. }
  1196. re_initialize();
  1197. goto restart;
  1198. }
  1199. restarted = 0;
  1200. gettimeofday(&tv_even, (struct timezone *)NULL);
  1201. while (1) {
  1202. if (for_all_proc_cpus(cpu_is_not_present)) {
  1203. re_initialize();
  1204. goto restart;
  1205. }
  1206. sleep(interval_sec);
  1207. retval = for_all_cpus(get_counters, ODD_COUNTERS);
  1208. if (retval < -1) {
  1209. exit(retval);
  1210. } else if (retval == -1) {
  1211. re_initialize();
  1212. goto restart;
  1213. }
  1214. gettimeofday(&tv_odd, (struct timezone *)NULL);
  1215. timersub(&tv_odd, &tv_even, &tv_delta);
  1216. for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
  1217. compute_average(EVEN_COUNTERS);
  1218. format_all_counters(EVEN_COUNTERS);
  1219. flush_stdout();
  1220. sleep(interval_sec);
  1221. retval = for_all_cpus(get_counters, EVEN_COUNTERS);
  1222. if (retval < -1) {
  1223. exit(retval);
  1224. } else if (retval == -1) {
  1225. re_initialize();
  1226. goto restart;
  1227. }
  1228. gettimeofday(&tv_even, (struct timezone *)NULL);
  1229. timersub(&tv_even, &tv_odd, &tv_delta);
  1230. for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
  1231. compute_average(ODD_COUNTERS);
  1232. format_all_counters(ODD_COUNTERS);
  1233. flush_stdout();
  1234. }
  1235. }
  1236. void check_dev_msr()
  1237. {
  1238. struct stat sb;
  1239. if (stat("/dev/cpu/0/msr", &sb)) {
  1240. fprintf(stderr, "no /dev/cpu/0/msr\n");
  1241. fprintf(stderr, "Try \"# modprobe msr\"\n");
  1242. exit(-5);
  1243. }
  1244. }
  1245. void check_super_user()
  1246. {
  1247. if (getuid() != 0) {
  1248. fprintf(stderr, "must be root\n");
  1249. exit(-6);
  1250. }
  1251. }
  1252. int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
  1253. {
  1254. if (!genuine_intel)
  1255. return 0;
  1256. if (family != 6)
  1257. return 0;
  1258. switch (model) {
  1259. case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
  1260. case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
  1261. case 0x1F: /* Core i7 and i5 Processor - Nehalem */
  1262. case 0x25: /* Westmere Client - Clarkdale, Arrandale */
  1263. case 0x2C: /* Westmere EP - Gulftown */
  1264. case 0x2A: /* SNB */
  1265. case 0x2D: /* SNB Xeon */
  1266. case 0x3A: /* IVB */
  1267. case 0x3E: /* IVB Xeon */
  1268. case 0x3C: /* HSW */
  1269. case 0x3F: /* HSW */
  1270. case 0x45: /* HSW */
  1271. case 0x46: /* HSW */
  1272. case 0x37: /* BYT */
  1273. case 0x4D: /* AVN */
  1274. return 1;
  1275. case 0x2E: /* Nehalem-EX Xeon - Beckton */
  1276. case 0x2F: /* Westmere-EX Xeon - Eagleton */
  1277. default:
  1278. return 0;
  1279. }
  1280. }
  1281. int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
  1282. {
  1283. if (!genuine_intel)
  1284. return 0;
  1285. if (family != 6)
  1286. return 0;
  1287. switch (model) {
  1288. case 0x3E: /* IVB Xeon */
  1289. return 1;
  1290. default:
  1291. return 0;
  1292. }
  1293. }
  1294. /*
  1295. * print_epb()
  1296. * Decode the ENERGY_PERF_BIAS MSR
  1297. */
  1298. int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  1299. {
  1300. unsigned long long msr;
  1301. char *epb_string;
  1302. int cpu;
  1303. if (!has_epb)
  1304. return 0;
  1305. cpu = t->cpu_id;
  1306. /* EPB is per-package */
  1307. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  1308. return 0;
  1309. if (cpu_migrate(cpu)) {
  1310. fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
  1311. return -1;
  1312. }
  1313. if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
  1314. return 0;
  1315. switch (msr & 0x7) {
  1316. case ENERGY_PERF_BIAS_PERFORMANCE:
  1317. epb_string = "performance";
  1318. break;
  1319. case ENERGY_PERF_BIAS_NORMAL:
  1320. epb_string = "balanced";
  1321. break;
  1322. case ENERGY_PERF_BIAS_POWERSAVE:
  1323. epb_string = "powersave";
  1324. break;
  1325. default:
  1326. epb_string = "custom";
  1327. break;
  1328. }
  1329. fprintf(stderr, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu, msr, epb_string);
  1330. return 0;
  1331. }
  1332. #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
  1333. #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
  1334. double get_tdp(model)
  1335. {
  1336. unsigned long long msr;
  1337. if (do_rapl & RAPL_PKG_POWER_INFO)
  1338. if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
  1339. return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
  1340. switch (model) {
  1341. case 0x37:
  1342. case 0x4D:
  1343. return 30.0;
  1344. default:
  1345. return 135.0;
  1346. }
  1347. }
  1348. /*
  1349. * rapl_probe()
  1350. *
  1351. * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
  1352. */
  1353. void rapl_probe(unsigned int family, unsigned int model)
  1354. {
  1355. unsigned long long msr;
  1356. unsigned int time_unit;
  1357. double tdp;
  1358. if (!genuine_intel)
  1359. return;
  1360. if (family != 6)
  1361. return;
  1362. switch (model) {
  1363. case 0x2A:
  1364. case 0x3A:
  1365. case 0x3C: /* HSW */
  1366. case 0x3F: /* HSW */
  1367. case 0x45: /* HSW */
  1368. case 0x46: /* HSW */
  1369. do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
  1370. break;
  1371. case 0x2D:
  1372. case 0x3E:
  1373. do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
  1374. break;
  1375. case 0x37: /* BYT */
  1376. case 0x4D: /* AVN */
  1377. do_rapl = RAPL_PKG | RAPL_CORES ;
  1378. break;
  1379. default:
  1380. return;
  1381. }
  1382. /* units on package 0, verify later other packages match */
  1383. if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
  1384. return;
  1385. rapl_power_units = 1.0 / (1 << (msr & 0xF));
  1386. if (model == 0x37)
  1387. rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
  1388. else
  1389. rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
  1390. time_unit = msr >> 16 & 0xF;
  1391. if (time_unit == 0)
  1392. time_unit = 0xA;
  1393. rapl_time_units = 1.0 / (1 << (time_unit));
  1394. tdp = get_tdp(model);
  1395. rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
  1396. if (verbose)
  1397. fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
  1398. return;
  1399. }
  1400. int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  1401. {
  1402. unsigned long long msr;
  1403. unsigned int dts;
  1404. int cpu;
  1405. if (!(do_dts || do_ptm))
  1406. return 0;
  1407. cpu = t->cpu_id;
  1408. /* DTS is per-core, no need to print for each thread */
  1409. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
  1410. return 0;
  1411. if (cpu_migrate(cpu)) {
  1412. fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
  1413. return -1;
  1414. }
  1415. if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) {
  1416. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
  1417. return 0;
  1418. dts = (msr >> 16) & 0x7F;
  1419. fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
  1420. cpu, msr, tcc_activation_temp - dts);
  1421. #ifdef THERM_DEBUG
  1422. if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
  1423. return 0;
  1424. dts = (msr >> 16) & 0x7F;
  1425. dts2 = (msr >> 8) & 0x7F;
  1426. fprintf(stderr, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
  1427. cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
  1428. #endif
  1429. }
  1430. if (do_dts) {
  1431. unsigned int resolution;
  1432. if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
  1433. return 0;
  1434. dts = (msr >> 16) & 0x7F;
  1435. resolution = (msr >> 27) & 0xF;
  1436. fprintf(stderr, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
  1437. cpu, msr, tcc_activation_temp - dts, resolution);
  1438. #ifdef THERM_DEBUG
  1439. if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
  1440. return 0;
  1441. dts = (msr >> 16) & 0x7F;
  1442. dts2 = (msr >> 8) & 0x7F;
  1443. fprintf(stderr, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
  1444. cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
  1445. #endif
  1446. }
  1447. return 0;
  1448. }
  1449. void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
  1450. {
  1451. fprintf(stderr, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
  1452. cpu, label,
  1453. ((msr >> 15) & 1) ? "EN" : "DIS",
  1454. ((msr >> 0) & 0x7FFF) * rapl_power_units,
  1455. (1.0 + (((msr >> 22) & 0x3)/4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units,
  1456. (((msr >> 16) & 1) ? "EN" : "DIS"));
  1457. return;
  1458. }
  1459. int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  1460. {
  1461. unsigned long long msr;
  1462. int cpu;
  1463. if (!do_rapl)
  1464. return 0;
  1465. /* RAPL counters are per package, so print only for 1st thread/package */
  1466. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  1467. return 0;
  1468. cpu = t->cpu_id;
  1469. if (cpu_migrate(cpu)) {
  1470. fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
  1471. return -1;
  1472. }
  1473. if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
  1474. return -1;
  1475. if (verbose) {
  1476. fprintf(stderr, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
  1477. "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
  1478. rapl_power_units, rapl_energy_units, rapl_time_units);
  1479. }
  1480. if (do_rapl & RAPL_PKG_POWER_INFO) {
  1481. if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
  1482. return -5;
  1483. fprintf(stderr, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
  1484. cpu, msr,
  1485. ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1486. ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1487. ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1488. ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
  1489. }
  1490. if (do_rapl & RAPL_PKG) {
  1491. if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
  1492. return -9;
  1493. fprintf(stderr, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
  1494. cpu, msr, (msr >> 63) & 1 ? "": "UN");
  1495. print_power_limit_msr(cpu, msr, "PKG Limit #1");
  1496. fprintf(stderr, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
  1497. cpu,
  1498. ((msr >> 47) & 1) ? "EN" : "DIS",
  1499. ((msr >> 32) & 0x7FFF) * rapl_power_units,
  1500. (1.0 + (((msr >> 54) & 0x3)/4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
  1501. ((msr >> 48) & 1) ? "EN" : "DIS");
  1502. }
  1503. if (do_rapl & RAPL_DRAM) {
  1504. if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr))
  1505. return -6;
  1506. fprintf(stderr, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
  1507. cpu, msr,
  1508. ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1509. ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1510. ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
  1511. ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
  1512. if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr))
  1513. return -9;
  1514. fprintf(stderr, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
  1515. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  1516. print_power_limit_msr(cpu, msr, "DRAM Limit");
  1517. }
  1518. if (do_rapl & RAPL_CORE_POLICY) {
  1519. if (verbose) {
  1520. if (get_msr(cpu, MSR_PP0_POLICY, &msr))
  1521. return -7;
  1522. fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
  1523. }
  1524. }
  1525. if (do_rapl & RAPL_CORES) {
  1526. if (verbose) {
  1527. if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
  1528. return -9;
  1529. fprintf(stderr, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
  1530. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  1531. print_power_limit_msr(cpu, msr, "Cores Limit");
  1532. }
  1533. }
  1534. if (do_rapl & RAPL_GFX) {
  1535. if (verbose) {
  1536. if (get_msr(cpu, MSR_PP1_POLICY, &msr))
  1537. return -8;
  1538. fprintf(stderr, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF);
  1539. if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr))
  1540. return -9;
  1541. fprintf(stderr, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
  1542. cpu, msr, (msr >> 31) & 1 ? "": "UN");
  1543. print_power_limit_msr(cpu, msr, "GFX Limit");
  1544. }
  1545. }
  1546. return 0;
  1547. }
  1548. int is_snb(unsigned int family, unsigned int model)
  1549. {
  1550. if (!genuine_intel)
  1551. return 0;
  1552. switch (model) {
  1553. case 0x2A:
  1554. case 0x2D:
  1555. case 0x3A: /* IVB */
  1556. case 0x3E: /* IVB Xeon */
  1557. case 0x3C: /* HSW */
  1558. case 0x3F: /* HSW */
  1559. case 0x45: /* HSW */
  1560. case 0x46: /* HSW */
  1561. return 1;
  1562. }
  1563. return 0;
  1564. }
  1565. int has_c8_c9_c10(unsigned int family, unsigned int model)
  1566. {
  1567. if (!genuine_intel)
  1568. return 0;
  1569. switch (model) {
  1570. case 0x45:
  1571. return 1;
  1572. }
  1573. return 0;
  1574. }
  1575. int is_slm(unsigned int family, unsigned int model)
  1576. {
  1577. if (!genuine_intel)
  1578. return 0;
  1579. switch (model) {
  1580. case 0x37: /* BYT */
  1581. case 0x4D: /* AVN */
  1582. return 1;
  1583. }
  1584. return 0;
  1585. }
  1586. #define SLM_BCLK_FREQS 5
  1587. double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
  1588. double slm_bclk(void)
  1589. {
  1590. unsigned long long msr = 3;
  1591. unsigned int i;
  1592. double freq;
  1593. if (get_msr(0, MSR_FSB_FREQ, &msr))
  1594. fprintf(stderr, "SLM BCLK: unknown\n");
  1595. i = msr & 0xf;
  1596. if (i >= SLM_BCLK_FREQS) {
  1597. fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
  1598. msr = 3;
  1599. }
  1600. freq = slm_freq_table[i];
  1601. fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
  1602. return freq;
  1603. }
  1604. double discover_bclk(unsigned int family, unsigned int model)
  1605. {
  1606. if (is_snb(family, model))
  1607. return 100.00;
  1608. else if (is_slm(family, model))
  1609. return slm_bclk();
  1610. else
  1611. return 133.33;
  1612. }
  1613. /*
  1614. * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
  1615. * the Thermal Control Circuit (TCC) activates.
  1616. * This is usually equal to tjMax.
  1617. *
  1618. * Older processors do not have this MSR, so there we guess,
  1619. * but also allow cmdline over-ride with -T.
  1620. *
  1621. * Several MSR temperature values are in units of degrees-C
  1622. * below this value, including the Digital Thermal Sensor (DTS),
  1623. * Package Thermal Management Sensor (PTM), and thermal event thresholds.
  1624. */
  1625. int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
  1626. {
  1627. unsigned long long msr;
  1628. unsigned int target_c_local;
  1629. int cpu;
  1630. /* tcc_activation_temp is used only for dts or ptm */
  1631. if (!(do_dts || do_ptm))
  1632. return 0;
  1633. /* this is a per-package concept */
  1634. if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
  1635. return 0;
  1636. cpu = t->cpu_id;
  1637. if (cpu_migrate(cpu)) {
  1638. fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
  1639. return -1;
  1640. }
  1641. if (tcc_activation_temp_override != 0) {
  1642. tcc_activation_temp = tcc_activation_temp_override;
  1643. fprintf(stderr, "cpu%d: Using cmdline TCC Target (%d C)\n",
  1644. cpu, tcc_activation_temp);
  1645. return 0;
  1646. }
  1647. /* Temperature Target MSR is Nehalem and newer only */
  1648. if (!do_nehalem_platform_info)
  1649. goto guess;
  1650. if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
  1651. goto guess;
  1652. target_c_local = (msr >> 16) & 0x7F;
  1653. if (verbose)
  1654. fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
  1655. cpu, msr, target_c_local);
  1656. if (target_c_local < 85 || target_c_local > 127)
  1657. goto guess;
  1658. tcc_activation_temp = target_c_local;
  1659. return 0;
  1660. guess:
  1661. tcc_activation_temp = TJMAX_DEFAULT;
  1662. fprintf(stderr, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
  1663. cpu, tcc_activation_temp);
  1664. return 0;
  1665. }
  1666. void check_cpuid()
  1667. {
  1668. unsigned int eax, ebx, ecx, edx, max_level;
  1669. unsigned int fms, family, model, stepping;
  1670. eax = ebx = ecx = edx = 0;
  1671. asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
  1672. if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
  1673. genuine_intel = 1;
  1674. if (verbose)
  1675. fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
  1676. (char *)&ebx, (char *)&edx, (char *)&ecx);
  1677. asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
  1678. family = (fms >> 8) & 0xf;
  1679. model = (fms >> 4) & 0xf;
  1680. stepping = fms & 0xf;
  1681. if (family == 6 || family == 0xf)
  1682. model += ((fms >> 16) & 0xf) << 4;
  1683. if (verbose)
  1684. fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
  1685. max_level, family, model, stepping, family, model, stepping);
  1686. if (!(edx & (1 << 5))) {
  1687. fprintf(stderr, "CPUID: no MSR\n");
  1688. exit(1);
  1689. }
  1690. /*
  1691. * check max extended function levels of CPUID.
  1692. * This is needed to check for invariant TSC.
  1693. * This check is valid for both Intel and AMD.
  1694. */
  1695. ebx = ecx = edx = 0;
  1696. asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
  1697. if (max_level < 0x80000007) {
  1698. fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
  1699. exit(1);
  1700. }
  1701. /*
  1702. * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
  1703. * this check is valid for both Intel and AMD
  1704. */
  1705. asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
  1706. has_invariant_tsc = edx & (1 << 8);
  1707. if (!has_invariant_tsc) {
  1708. fprintf(stderr, "No invariant TSC\n");
  1709. exit(1);
  1710. }
  1711. /*
  1712. * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
  1713. * this check is valid for both Intel and AMD
  1714. */
  1715. asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
  1716. has_aperf = ecx & (1 << 0);
  1717. do_dts = eax & (1 << 0);
  1718. do_ptm = eax & (1 << 6);
  1719. has_epb = ecx & (1 << 3);
  1720. if (verbose)
  1721. fprintf(stderr, "CPUID(6): %s%s%s%s\n",
  1722. has_aperf ? "APERF" : "No APERF!",
  1723. do_dts ? ", DTS" : "",
  1724. do_ptm ? ", PTM": "",
  1725. has_epb ? ", EPB": "");
  1726. if (!has_aperf)
  1727. exit(-1);
  1728. do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
  1729. do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
  1730. do_smi = do_nhm_cstates;
  1731. do_snb_cstates = is_snb(family, model);
  1732. do_c8_c9_c10 = has_c8_c9_c10(family, model);
  1733. do_slm_cstates = is_slm(family, model);
  1734. bclk = discover_bclk(family, model);
  1735. do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
  1736. do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
  1737. rapl_probe(family, model);
  1738. return;
  1739. }
  1740. void usage()
  1741. {
  1742. fprintf(stderr, "%s: [-v][-R][-T][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
  1743. progname);
  1744. exit(1);
  1745. }
  1746. /*
  1747. * in /dev/cpu/ return success for names that are numbers
  1748. * ie. filter out ".", "..", "microcode".
  1749. */
  1750. int dir_filter(const struct dirent *dirp)
  1751. {
  1752. if (isdigit(dirp->d_name[0]))
  1753. return 1;
  1754. else
  1755. return 0;
  1756. }
  1757. int open_dev_cpu_msr(int dummy1)
  1758. {
  1759. return 0;
  1760. }
  1761. void topology_probe()
  1762. {
  1763. int i;
  1764. int max_core_id = 0;
  1765. int max_package_id = 0;
  1766. int max_siblings = 0;
  1767. struct cpu_topology {
  1768. int core_id;
  1769. int physical_package_id;
  1770. } *cpus;
  1771. /* Initialize num_cpus, max_cpu_num */
  1772. topo.num_cpus = 0;
  1773. topo.max_cpu_num = 0;
  1774. for_all_proc_cpus(count_cpus);
  1775. if (!summary_only && topo.num_cpus > 1)
  1776. show_cpu = 1;
  1777. if (verbose > 1)
  1778. fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
  1779. cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
  1780. if (cpus == NULL) {
  1781. perror("calloc cpus");
  1782. exit(1);
  1783. }
  1784. /*
  1785. * Allocate and initialize cpu_present_set
  1786. */
  1787. cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
  1788. if (cpu_present_set == NULL) {
  1789. perror("CPU_ALLOC");
  1790. exit(3);
  1791. }
  1792. cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
  1793. CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
  1794. for_all_proc_cpus(mark_cpu_present);
  1795. /*
  1796. * Allocate and initialize cpu_affinity_set
  1797. */
  1798. cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
  1799. if (cpu_affinity_set == NULL) {
  1800. perror("CPU_ALLOC");
  1801. exit(3);
  1802. }
  1803. cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
  1804. CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
  1805. /*
  1806. * For online cpus
  1807. * find max_core_id, max_package_id
  1808. */
  1809. for (i = 0; i <= topo.max_cpu_num; ++i) {
  1810. int siblings;
  1811. if (cpu_is_not_present(i)) {
  1812. if (verbose > 1)
  1813. fprintf(stderr, "cpu%d NOT PRESENT\n", i);
  1814. continue;
  1815. }
  1816. cpus[i].core_id = get_core_id(i);
  1817. if (cpus[i].core_id > max_core_id)
  1818. max_core_id = cpus[i].core_id;
  1819. cpus[i].physical_package_id = get_physical_package_id(i);
  1820. if (cpus[i].physical_package_id > max_package_id)
  1821. max_package_id = cpus[i].physical_package_id;
  1822. siblings = get_num_ht_siblings(i);
  1823. if (siblings > max_siblings)
  1824. max_siblings = siblings;
  1825. if (verbose > 1)
  1826. fprintf(stderr, "cpu %d pkg %d core %d\n",
  1827. i, cpus[i].physical_package_id, cpus[i].core_id);
  1828. }
  1829. topo.num_cores_per_pkg = max_core_id + 1;
  1830. if (verbose > 1)
  1831. fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
  1832. max_core_id, topo.num_cores_per_pkg);
  1833. if (!summary_only && topo.num_cores_per_pkg > 1)
  1834. show_core = 1;
  1835. topo.num_packages = max_package_id + 1;
  1836. if (verbose > 1)
  1837. fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
  1838. max_package_id, topo.num_packages);
  1839. if (!summary_only && topo.num_packages > 1)
  1840. show_pkg = 1;
  1841. topo.num_threads_per_core = max_siblings;
  1842. if (verbose > 1)
  1843. fprintf(stderr, "max_siblings %d\n", max_siblings);
  1844. free(cpus);
  1845. }
  1846. void
  1847. allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
  1848. {
  1849. int i;
  1850. *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
  1851. topo.num_packages, sizeof(struct thread_data));
  1852. if (*t == NULL)
  1853. goto error;
  1854. for (i = 0; i < topo.num_threads_per_core *
  1855. topo.num_cores_per_pkg * topo.num_packages; i++)
  1856. (*t)[i].cpu_id = -1;
  1857. *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
  1858. sizeof(struct core_data));
  1859. if (*c == NULL)
  1860. goto error;
  1861. for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
  1862. (*c)[i].core_id = -1;
  1863. *p = calloc(topo.num_packages, sizeof(struct pkg_data));
  1864. if (*p == NULL)
  1865. goto error;
  1866. for (i = 0; i < topo.num_packages; i++)
  1867. (*p)[i].package_id = i;
  1868. return;
  1869. error:
  1870. perror("calloc counters");
  1871. exit(1);
  1872. }
  1873. /*
  1874. * init_counter()
  1875. *
  1876. * set cpu_id, core_num, pkg_num
  1877. * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
  1878. *
  1879. * increment topo.num_cores when 1st core in pkg seen
  1880. */
  1881. void init_counter(struct thread_data *thread_base, struct core_data *core_base,
  1882. struct pkg_data *pkg_base, int thread_num, int core_num,
  1883. int pkg_num, int cpu_id)
  1884. {
  1885. struct thread_data *t;
  1886. struct core_data *c;
  1887. struct pkg_data *p;
  1888. t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
  1889. c = GET_CORE(core_base, core_num, pkg_num);
  1890. p = GET_PKG(pkg_base, pkg_num);
  1891. t->cpu_id = cpu_id;
  1892. if (thread_num == 0) {
  1893. t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
  1894. if (cpu_is_first_core_in_package(cpu_id))
  1895. t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
  1896. }
  1897. c->core_id = core_num;
  1898. p->package_id = pkg_num;
  1899. }
  1900. int initialize_counters(int cpu_id)
  1901. {
  1902. int my_thread_id, my_core_id, my_package_id;
  1903. my_package_id = get_physical_package_id(cpu_id);
  1904. my_core_id = get_core_id(cpu_id);
  1905. if (cpu_is_first_sibling_in_core(cpu_id)) {
  1906. my_thread_id = 0;
  1907. topo.num_cores++;
  1908. } else {
  1909. my_thread_id = 1;
  1910. }
  1911. init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
  1912. init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
  1913. return 0;
  1914. }
  1915. void allocate_output_buffer()
  1916. {
  1917. output_buffer = calloc(1, (1 + topo.num_cpus) * 256);
  1918. outp = output_buffer;
  1919. if (outp == NULL) {
  1920. perror("calloc");
  1921. exit(-1);
  1922. }
  1923. }
  1924. void setup_all_buffers(void)
  1925. {
  1926. topology_probe();
  1927. allocate_counters(&thread_even, &core_even, &package_even);
  1928. allocate_counters(&thread_odd, &core_odd, &package_odd);
  1929. allocate_output_buffer();
  1930. for_all_proc_cpus(initialize_counters);
  1931. }
  1932. void turbostat_init()
  1933. {
  1934. check_cpuid();
  1935. check_dev_msr();
  1936. check_super_user();
  1937. setup_all_buffers();
  1938. if (verbose)
  1939. print_verbose_header();
  1940. if (verbose)
  1941. for_all_cpus(print_epb, ODD_COUNTERS);
  1942. if (verbose)
  1943. for_all_cpus(print_rapl, ODD_COUNTERS);
  1944. for_all_cpus(set_temperature_target, ODD_COUNTERS);
  1945. if (verbose)
  1946. for_all_cpus(print_thermal, ODD_COUNTERS);
  1947. }
  1948. int fork_it(char **argv)
  1949. {
  1950. pid_t child_pid;
  1951. int status;
  1952. status = for_all_cpus(get_counters, EVEN_COUNTERS);
  1953. if (status)
  1954. exit(status);
  1955. /* clear affinity side-effect of get_counters() */
  1956. sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
  1957. gettimeofday(&tv_even, (struct timezone *)NULL);
  1958. child_pid = fork();
  1959. if (!child_pid) {
  1960. /* child */
  1961. execvp(argv[0], argv);
  1962. } else {
  1963. /* parent */
  1964. if (child_pid == -1) {
  1965. perror("fork");
  1966. exit(1);
  1967. }
  1968. signal(SIGINT, SIG_IGN);
  1969. signal(SIGQUIT, SIG_IGN);
  1970. if (waitpid(child_pid, &status, 0) == -1) {
  1971. perror("wait");
  1972. exit(status);
  1973. }
  1974. }
  1975. /*
  1976. * n.b. fork_it() does not check for errors from for_all_cpus()
  1977. * because re-starting is problematic when forking
  1978. */
  1979. for_all_cpus(get_counters, ODD_COUNTERS);
  1980. gettimeofday(&tv_odd, (struct timezone *)NULL);
  1981. timersub(&tv_odd, &tv_even, &tv_delta);
  1982. for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
  1983. compute_average(EVEN_COUNTERS);
  1984. format_all_counters(EVEN_COUNTERS);
  1985. flush_stderr();
  1986. fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
  1987. return status;
  1988. }
  1989. void cmdline(int argc, char **argv)
  1990. {
  1991. int opt;
  1992. progname = argv[0];
  1993. while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:RT:")) != -1) {
  1994. switch (opt) {
  1995. case 'p':
  1996. show_core_only++;
  1997. break;
  1998. case 'P':
  1999. show_pkg_only++;
  2000. break;
  2001. case 'S':
  2002. summary_only++;
  2003. break;
  2004. case 'v':
  2005. verbose++;
  2006. break;
  2007. case 'i':
  2008. interval_sec = atoi(optarg);
  2009. break;
  2010. case 'c':
  2011. sscanf(optarg, "%x", &extra_delta_offset32);
  2012. break;
  2013. case 'C':
  2014. sscanf(optarg, "%x", &extra_delta_offset64);
  2015. break;
  2016. case 'm':
  2017. sscanf(optarg, "%x", &extra_msr_offset32);
  2018. break;
  2019. case 'M':
  2020. sscanf(optarg, "%x", &extra_msr_offset64);
  2021. break;
  2022. case 'R':
  2023. rapl_verbose++;
  2024. break;
  2025. case 'T':
  2026. tcc_activation_temp_override = atoi(optarg);
  2027. break;
  2028. default:
  2029. usage();
  2030. }
  2031. }
  2032. }
  2033. int main(int argc, char **argv)
  2034. {
  2035. cmdline(argc, argv);
  2036. if (verbose)
  2037. fprintf(stderr, "turbostat v3.5 April 26, 2013"
  2038. " - Len Brown <lenb@kernel.org>\n");
  2039. turbostat_init();
  2040. /*
  2041. * if any params left, it must be a command to fork
  2042. */
  2043. if (argc - optind)
  2044. return fork_it(argv + optind);
  2045. else
  2046. turbostat_loop();
  2047. return 0;
  2048. }