builtin-kvm.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/evsel.h"
  4. #include "util/util.h"
  5. #include "util/cache.h"
  6. #include "util/symbol.h"
  7. #include "util/thread.h"
  8. #include "util/header.h"
  9. #include "util/session.h"
  10. #include "util/parse-options.h"
  11. #include "util/trace-event.h"
  12. #include "util/debug.h"
  13. #include "util/debugfs.h"
  14. #include "util/tool.h"
  15. #include "util/stat.h"
  16. #include <sys/prctl.h>
  17. #include <semaphore.h>
  18. #include <pthread.h>
  19. #include <math.h>
  20. #include "../../arch/x86/include/asm/svm.h"
  21. #include "../../arch/x86/include/asm/vmx.h"
  22. #include "../../arch/x86/include/asm/kvm.h"
  23. struct event_key {
  24. #define INVALID_KEY (~0ULL)
  25. u64 key;
  26. int info;
  27. };
  28. struct kvm_event_stats {
  29. u64 time;
  30. struct stats stats;
  31. };
  32. struct kvm_event {
  33. struct list_head hash_entry;
  34. struct rb_node rb;
  35. struct event_key key;
  36. struct kvm_event_stats total;
  37. #define DEFAULT_VCPU_NUM 8
  38. int max_vcpu;
  39. struct kvm_event_stats *vcpu;
  40. };
  41. typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
  42. struct kvm_event_key {
  43. const char *name;
  44. key_cmp_fun key;
  45. };
  46. struct perf_kvm;
  47. struct kvm_events_ops {
  48. bool (*is_begin_event)(struct perf_evsel *evsel,
  49. struct perf_sample *sample,
  50. struct event_key *key);
  51. bool (*is_end_event)(struct perf_evsel *evsel,
  52. struct perf_sample *sample, struct event_key *key);
  53. void (*decode_key)(struct perf_kvm *kvm, struct event_key *key,
  54. char decode[20]);
  55. const char *name;
  56. };
  57. struct exit_reasons_table {
  58. unsigned long exit_code;
  59. const char *reason;
  60. };
  61. #define EVENTS_BITS 12
  62. #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
  63. struct perf_kvm {
  64. struct perf_tool tool;
  65. struct perf_session *session;
  66. const char *file_name;
  67. const char *report_event;
  68. const char *sort_key;
  69. int trace_vcpu;
  70. struct exit_reasons_table *exit_reasons;
  71. int exit_reasons_size;
  72. const char *exit_reasons_isa;
  73. struct kvm_events_ops *events_ops;
  74. key_cmp_fun compare;
  75. struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
  76. u64 total_time;
  77. u64 total_count;
  78. struct rb_root result;
  79. };
  80. static void exit_event_get_key(struct perf_evsel *evsel,
  81. struct perf_sample *sample,
  82. struct event_key *key)
  83. {
  84. key->info = 0;
  85. key->key = perf_evsel__intval(evsel, sample, "exit_reason");
  86. }
  87. static bool kvm_exit_event(struct perf_evsel *evsel)
  88. {
  89. return !strcmp(evsel->name, "kvm:kvm_exit");
  90. }
  91. static bool exit_event_begin(struct perf_evsel *evsel,
  92. struct perf_sample *sample, struct event_key *key)
  93. {
  94. if (kvm_exit_event(evsel)) {
  95. exit_event_get_key(evsel, sample, key);
  96. return true;
  97. }
  98. return false;
  99. }
  100. static bool kvm_entry_event(struct perf_evsel *evsel)
  101. {
  102. return !strcmp(evsel->name, "kvm:kvm_entry");
  103. }
  104. static bool exit_event_end(struct perf_evsel *evsel,
  105. struct perf_sample *sample __maybe_unused,
  106. struct event_key *key __maybe_unused)
  107. {
  108. return kvm_entry_event(evsel);
  109. }
  110. static struct exit_reasons_table vmx_exit_reasons[] = {
  111. VMX_EXIT_REASONS
  112. };
  113. static struct exit_reasons_table svm_exit_reasons[] = {
  114. SVM_EXIT_REASONS
  115. };
  116. static const char *get_exit_reason(struct perf_kvm *kvm, u64 exit_code)
  117. {
  118. int i = kvm->exit_reasons_size;
  119. struct exit_reasons_table *tbl = kvm->exit_reasons;
  120. while (i--) {
  121. if (tbl->exit_code == exit_code)
  122. return tbl->reason;
  123. tbl++;
  124. }
  125. pr_err("unknown kvm exit code:%lld on %s\n",
  126. (unsigned long long)exit_code, kvm->exit_reasons_isa);
  127. return "UNKNOWN";
  128. }
  129. static void exit_event_decode_key(struct perf_kvm *kvm,
  130. struct event_key *key,
  131. char decode[20])
  132. {
  133. const char *exit_reason = get_exit_reason(kvm, key->key);
  134. scnprintf(decode, 20, "%s", exit_reason);
  135. }
  136. static struct kvm_events_ops exit_events = {
  137. .is_begin_event = exit_event_begin,
  138. .is_end_event = exit_event_end,
  139. .decode_key = exit_event_decode_key,
  140. .name = "VM-EXIT"
  141. };
  142. /*
  143. * For the mmio events, we treat:
  144. * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
  145. * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
  146. */
  147. static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
  148. struct event_key *key)
  149. {
  150. key->key = perf_evsel__intval(evsel, sample, "gpa");
  151. key->info = perf_evsel__intval(evsel, sample, "type");
  152. }
  153. #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
  154. #define KVM_TRACE_MMIO_READ 1
  155. #define KVM_TRACE_MMIO_WRITE 2
  156. static bool mmio_event_begin(struct perf_evsel *evsel,
  157. struct perf_sample *sample, struct event_key *key)
  158. {
  159. /* MMIO read begin event in kernel. */
  160. if (kvm_exit_event(evsel))
  161. return true;
  162. /* MMIO write begin event in kernel. */
  163. if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
  164. perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
  165. mmio_event_get_key(evsel, sample, key);
  166. return true;
  167. }
  168. return false;
  169. }
  170. static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
  171. struct event_key *key)
  172. {
  173. /* MMIO write end event in kernel. */
  174. if (kvm_entry_event(evsel))
  175. return true;
  176. /* MMIO read end event in kernel.*/
  177. if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
  178. perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
  179. mmio_event_get_key(evsel, sample, key);
  180. return true;
  181. }
  182. return false;
  183. }
  184. static void mmio_event_decode_key(struct perf_kvm *kvm __maybe_unused,
  185. struct event_key *key,
  186. char decode[20])
  187. {
  188. scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
  189. key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
  190. }
  191. static struct kvm_events_ops mmio_events = {
  192. .is_begin_event = mmio_event_begin,
  193. .is_end_event = mmio_event_end,
  194. .decode_key = mmio_event_decode_key,
  195. .name = "MMIO Access"
  196. };
  197. /* The time of emulation pio access is from kvm_pio to kvm_entry. */
  198. static void ioport_event_get_key(struct perf_evsel *evsel,
  199. struct perf_sample *sample,
  200. struct event_key *key)
  201. {
  202. key->key = perf_evsel__intval(evsel, sample, "port");
  203. key->info = perf_evsel__intval(evsel, sample, "rw");
  204. }
  205. static bool ioport_event_begin(struct perf_evsel *evsel,
  206. struct perf_sample *sample,
  207. struct event_key *key)
  208. {
  209. if (!strcmp(evsel->name, "kvm:kvm_pio")) {
  210. ioport_event_get_key(evsel, sample, key);
  211. return true;
  212. }
  213. return false;
  214. }
  215. static bool ioport_event_end(struct perf_evsel *evsel,
  216. struct perf_sample *sample __maybe_unused,
  217. struct event_key *key __maybe_unused)
  218. {
  219. return kvm_entry_event(evsel);
  220. }
  221. static void ioport_event_decode_key(struct perf_kvm *kvm __maybe_unused,
  222. struct event_key *key,
  223. char decode[20])
  224. {
  225. scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
  226. key->info ? "POUT" : "PIN");
  227. }
  228. static struct kvm_events_ops ioport_events = {
  229. .is_begin_event = ioport_event_begin,
  230. .is_end_event = ioport_event_end,
  231. .decode_key = ioport_event_decode_key,
  232. .name = "IO Port Access"
  233. };
  234. static bool register_kvm_events_ops(struct perf_kvm *kvm)
  235. {
  236. bool ret = true;
  237. if (!strcmp(kvm->report_event, "vmexit"))
  238. kvm->events_ops = &exit_events;
  239. else if (!strcmp(kvm->report_event, "mmio"))
  240. kvm->events_ops = &mmio_events;
  241. else if (!strcmp(kvm->report_event, "ioport"))
  242. kvm->events_ops = &ioport_events;
  243. else {
  244. pr_err("Unknown report event:%s\n", kvm->report_event);
  245. ret = false;
  246. }
  247. return ret;
  248. }
  249. struct vcpu_event_record {
  250. int vcpu_id;
  251. u64 start_time;
  252. struct kvm_event *last_event;
  253. };
  254. static void init_kvm_event_record(struct perf_kvm *kvm)
  255. {
  256. int i;
  257. for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++)
  258. INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
  259. }
  260. static int kvm_events_hash_fn(u64 key)
  261. {
  262. return key & (EVENTS_CACHE_SIZE - 1);
  263. }
  264. static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
  265. {
  266. int old_max_vcpu = event->max_vcpu;
  267. if (vcpu_id < event->max_vcpu)
  268. return true;
  269. while (event->max_vcpu <= vcpu_id)
  270. event->max_vcpu += DEFAULT_VCPU_NUM;
  271. event->vcpu = realloc(event->vcpu,
  272. event->max_vcpu * sizeof(*event->vcpu));
  273. if (!event->vcpu) {
  274. pr_err("Not enough memory\n");
  275. return false;
  276. }
  277. memset(event->vcpu + old_max_vcpu, 0,
  278. (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
  279. return true;
  280. }
  281. static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
  282. {
  283. struct kvm_event *event;
  284. event = zalloc(sizeof(*event));
  285. if (!event) {
  286. pr_err("Not enough memory\n");
  287. return NULL;
  288. }
  289. event->key = *key;
  290. return event;
  291. }
  292. static struct kvm_event *find_create_kvm_event(struct perf_kvm *kvm,
  293. struct event_key *key)
  294. {
  295. struct kvm_event *event;
  296. struct list_head *head;
  297. BUG_ON(key->key == INVALID_KEY);
  298. head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
  299. list_for_each_entry(event, head, hash_entry)
  300. if (event->key.key == key->key && event->key.info == key->info)
  301. return event;
  302. event = kvm_alloc_init_event(key);
  303. if (!event)
  304. return NULL;
  305. list_add(&event->hash_entry, head);
  306. return event;
  307. }
  308. static bool handle_begin_event(struct perf_kvm *kvm,
  309. struct vcpu_event_record *vcpu_record,
  310. struct event_key *key, u64 timestamp)
  311. {
  312. struct kvm_event *event = NULL;
  313. if (key->key != INVALID_KEY)
  314. event = find_create_kvm_event(kvm, key);
  315. vcpu_record->last_event = event;
  316. vcpu_record->start_time = timestamp;
  317. return true;
  318. }
  319. static void
  320. kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
  321. {
  322. kvm_stats->time += time_diff;
  323. update_stats(&kvm_stats->stats, time_diff);
  324. }
  325. static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
  326. {
  327. struct kvm_event_stats *kvm_stats = &event->total;
  328. if (vcpu_id != -1)
  329. kvm_stats = &event->vcpu[vcpu_id];
  330. return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
  331. avg_stats(&kvm_stats->stats));
  332. }
  333. static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
  334. u64 time_diff)
  335. {
  336. kvm_update_event_stats(&event->total, time_diff);
  337. if (!kvm_event_expand(event, vcpu_id))
  338. return false;
  339. kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
  340. return true;
  341. }
  342. static bool handle_end_event(struct perf_kvm *kvm,
  343. struct vcpu_event_record *vcpu_record,
  344. struct event_key *key,
  345. u64 timestamp)
  346. {
  347. struct kvm_event *event;
  348. u64 time_begin, time_diff;
  349. event = vcpu_record->last_event;
  350. time_begin = vcpu_record->start_time;
  351. /* The begin event is not caught. */
  352. if (!time_begin)
  353. return true;
  354. /*
  355. * In some case, the 'begin event' only records the start timestamp,
  356. * the actual event is recognized in the 'end event' (e.g. mmio-event).
  357. */
  358. /* Both begin and end events did not get the key. */
  359. if (!event && key->key == INVALID_KEY)
  360. return true;
  361. if (!event)
  362. event = find_create_kvm_event(kvm, key);
  363. if (!event)
  364. return false;
  365. vcpu_record->last_event = NULL;
  366. vcpu_record->start_time = 0;
  367. BUG_ON(timestamp < time_begin);
  368. time_diff = timestamp - time_begin;
  369. return update_kvm_event(event, vcpu_record->vcpu_id, time_diff);
  370. }
  371. static
  372. struct vcpu_event_record *per_vcpu_record(struct thread *thread,
  373. struct perf_evsel *evsel,
  374. struct perf_sample *sample)
  375. {
  376. /* Only kvm_entry records vcpu id. */
  377. if (!thread->priv && kvm_entry_event(evsel)) {
  378. struct vcpu_event_record *vcpu_record;
  379. vcpu_record = zalloc(sizeof(*vcpu_record));
  380. if (!vcpu_record) {
  381. pr_err("%s: Not enough memory\n", __func__);
  382. return NULL;
  383. }
  384. vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
  385. thread->priv = vcpu_record;
  386. }
  387. return thread->priv;
  388. }
  389. static bool handle_kvm_event(struct perf_kvm *kvm,
  390. struct thread *thread,
  391. struct perf_evsel *evsel,
  392. struct perf_sample *sample)
  393. {
  394. struct vcpu_event_record *vcpu_record;
  395. struct event_key key = {.key = INVALID_KEY};
  396. vcpu_record = per_vcpu_record(thread, evsel, sample);
  397. if (!vcpu_record)
  398. return true;
  399. if (kvm->events_ops->is_begin_event(evsel, sample, &key))
  400. return handle_begin_event(kvm, vcpu_record, &key, sample->time);
  401. if (kvm->events_ops->is_end_event(evsel, sample, &key))
  402. return handle_end_event(kvm, vcpu_record, &key, sample->time);
  403. return true;
  404. }
  405. #define GET_EVENT_KEY(func, field) \
  406. static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
  407. { \
  408. if (vcpu == -1) \
  409. return event->total.field; \
  410. \
  411. if (vcpu >= event->max_vcpu) \
  412. return 0; \
  413. \
  414. return event->vcpu[vcpu].field; \
  415. }
  416. #define COMPARE_EVENT_KEY(func, field) \
  417. GET_EVENT_KEY(func, field) \
  418. static int compare_kvm_event_ ## func(struct kvm_event *one, \
  419. struct kvm_event *two, int vcpu)\
  420. { \
  421. return get_event_ ##func(one, vcpu) > \
  422. get_event_ ##func(two, vcpu); \
  423. }
  424. GET_EVENT_KEY(time, time);
  425. COMPARE_EVENT_KEY(count, stats.n);
  426. COMPARE_EVENT_KEY(mean, stats.mean);
  427. #define DEF_SORT_NAME_KEY(name, compare_key) \
  428. { #name, compare_kvm_event_ ## compare_key }
  429. static struct kvm_event_key keys[] = {
  430. DEF_SORT_NAME_KEY(sample, count),
  431. DEF_SORT_NAME_KEY(time, mean),
  432. { NULL, NULL }
  433. };
  434. static bool select_key(struct perf_kvm *kvm)
  435. {
  436. int i;
  437. for (i = 0; keys[i].name; i++) {
  438. if (!strcmp(keys[i].name, kvm->sort_key)) {
  439. kvm->compare = keys[i].key;
  440. return true;
  441. }
  442. }
  443. pr_err("Unknown compare key:%s\n", kvm->sort_key);
  444. return false;
  445. }
  446. static void insert_to_result(struct rb_root *result, struct kvm_event *event,
  447. key_cmp_fun bigger, int vcpu)
  448. {
  449. struct rb_node **rb = &result->rb_node;
  450. struct rb_node *parent = NULL;
  451. struct kvm_event *p;
  452. while (*rb) {
  453. p = container_of(*rb, struct kvm_event, rb);
  454. parent = *rb;
  455. if (bigger(event, p, vcpu))
  456. rb = &(*rb)->rb_left;
  457. else
  458. rb = &(*rb)->rb_right;
  459. }
  460. rb_link_node(&event->rb, parent, rb);
  461. rb_insert_color(&event->rb, result);
  462. }
  463. static void update_total_count(struct perf_kvm *kvm, struct kvm_event *event)
  464. {
  465. int vcpu = kvm->trace_vcpu;
  466. kvm->total_count += get_event_count(event, vcpu);
  467. kvm->total_time += get_event_time(event, vcpu);
  468. }
  469. static bool event_is_valid(struct kvm_event *event, int vcpu)
  470. {
  471. return !!get_event_count(event, vcpu);
  472. }
  473. static void sort_result(struct perf_kvm *kvm)
  474. {
  475. unsigned int i;
  476. int vcpu = kvm->trace_vcpu;
  477. struct kvm_event *event;
  478. for (i = 0; i < EVENTS_CACHE_SIZE; i++)
  479. list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry)
  480. if (event_is_valid(event, vcpu)) {
  481. update_total_count(kvm, event);
  482. insert_to_result(&kvm->result, event,
  483. kvm->compare, vcpu);
  484. }
  485. }
  486. /* returns left most element of result, and erase it */
  487. static struct kvm_event *pop_from_result(struct rb_root *result)
  488. {
  489. struct rb_node *node = rb_first(result);
  490. if (!node)
  491. return NULL;
  492. rb_erase(node, result);
  493. return container_of(node, struct kvm_event, rb);
  494. }
  495. static void print_vcpu_info(int vcpu)
  496. {
  497. pr_info("Analyze events for ");
  498. if (vcpu == -1)
  499. pr_info("all VCPUs:\n\n");
  500. else
  501. pr_info("VCPU %d:\n\n", vcpu);
  502. }
  503. static void print_result(struct perf_kvm *kvm)
  504. {
  505. char decode[20];
  506. struct kvm_event *event;
  507. int vcpu = kvm->trace_vcpu;
  508. pr_info("\n\n");
  509. print_vcpu_info(vcpu);
  510. pr_info("%20s ", kvm->events_ops->name);
  511. pr_info("%10s ", "Samples");
  512. pr_info("%9s ", "Samples%");
  513. pr_info("%9s ", "Time%");
  514. pr_info("%16s ", "Avg time");
  515. pr_info("\n\n");
  516. while ((event = pop_from_result(&kvm->result))) {
  517. u64 ecount, etime;
  518. ecount = get_event_count(event, vcpu);
  519. etime = get_event_time(event, vcpu);
  520. kvm->events_ops->decode_key(kvm, &event->key, decode);
  521. pr_info("%20s ", decode);
  522. pr_info("%10llu ", (unsigned long long)ecount);
  523. pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
  524. pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
  525. pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
  526. kvm_event_rel_stddev(vcpu, event));
  527. pr_info("\n");
  528. }
  529. pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n",
  530. (unsigned long long)kvm->total_count, kvm->total_time / 1e3);
  531. }
  532. static int process_sample_event(struct perf_tool *tool,
  533. union perf_event *event,
  534. struct perf_sample *sample,
  535. struct perf_evsel *evsel,
  536. struct machine *machine)
  537. {
  538. struct thread *thread = machine__findnew_thread(machine, sample->tid);
  539. struct perf_kvm *kvm = container_of(tool, struct perf_kvm, tool);
  540. if (thread == NULL) {
  541. pr_debug("problem processing %d event, skipping it.\n",
  542. event->header.type);
  543. return -1;
  544. }
  545. if (!handle_kvm_event(kvm, thread, evsel, sample))
  546. return -1;
  547. return 0;
  548. }
  549. static int get_cpu_isa(struct perf_session *session)
  550. {
  551. char *cpuid = session->header.env.cpuid;
  552. int isa;
  553. if (strstr(cpuid, "Intel"))
  554. isa = 1;
  555. else if (strstr(cpuid, "AMD"))
  556. isa = 0;
  557. else {
  558. pr_err("CPU %s is not supported.\n", cpuid);
  559. isa = -ENOTSUP;
  560. }
  561. return isa;
  562. }
  563. static int read_events(struct perf_kvm *kvm)
  564. {
  565. int ret;
  566. struct perf_tool eops = {
  567. .sample = process_sample_event,
  568. .comm = perf_event__process_comm,
  569. .ordered_samples = true,
  570. };
  571. kvm->tool = eops;
  572. kvm->session = perf_session__new(kvm->file_name, O_RDONLY, 0, false,
  573. &kvm->tool);
  574. if (!kvm->session) {
  575. pr_err("Initializing perf session failed\n");
  576. return -EINVAL;
  577. }
  578. if (!perf_session__has_traces(kvm->session, "kvm record"))
  579. return -EINVAL;
  580. /*
  581. * Do not use 'isa' recorded in kvm_exit tracepoint since it is not
  582. * traced in the old kernel.
  583. */
  584. ret = get_cpu_isa(kvm->session);
  585. if (ret < 0)
  586. return ret;
  587. if (ret == 1) {
  588. kvm->exit_reasons = vmx_exit_reasons;
  589. kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
  590. kvm->exit_reasons_isa = "VMX";
  591. }
  592. return perf_session__process_events(kvm->session, &kvm->tool);
  593. }
  594. static bool verify_vcpu(int vcpu)
  595. {
  596. if (vcpu != -1 && vcpu < 0) {
  597. pr_err("Invalid vcpu:%d.\n", vcpu);
  598. return false;
  599. }
  600. return true;
  601. }
  602. static int kvm_events_report_vcpu(struct perf_kvm *kvm)
  603. {
  604. int ret = -EINVAL;
  605. int vcpu = kvm->trace_vcpu;
  606. if (!verify_vcpu(vcpu))
  607. goto exit;
  608. if (!select_key(kvm))
  609. goto exit;
  610. if (!register_kvm_events_ops(kvm))
  611. goto exit;
  612. init_kvm_event_record(kvm);
  613. setup_pager();
  614. ret = read_events(kvm);
  615. if (ret)
  616. goto exit;
  617. sort_result(kvm);
  618. print_result(kvm);
  619. exit:
  620. return ret;
  621. }
  622. static const char * const record_args[] = {
  623. "record",
  624. "-R",
  625. "-f",
  626. "-m", "1024",
  627. "-c", "1",
  628. "-e", "kvm:kvm_entry",
  629. "-e", "kvm:kvm_exit",
  630. "-e", "kvm:kvm_mmio",
  631. "-e", "kvm:kvm_pio",
  632. };
  633. #define STRDUP_FAIL_EXIT(s) \
  634. ({ char *_p; \
  635. _p = strdup(s); \
  636. if (!_p) \
  637. return -ENOMEM; \
  638. _p; \
  639. })
  640. static int kvm_events_record(struct perf_kvm *kvm, int argc, const char **argv)
  641. {
  642. unsigned int rec_argc, i, j;
  643. const char **rec_argv;
  644. rec_argc = ARRAY_SIZE(record_args) + argc + 2;
  645. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  646. if (rec_argv == NULL)
  647. return -ENOMEM;
  648. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  649. rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
  650. rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
  651. rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
  652. for (j = 1; j < (unsigned int)argc; j++, i++)
  653. rec_argv[i] = argv[j];
  654. return cmd_record(i, rec_argv, NULL);
  655. }
  656. static int kvm_events_report(struct perf_kvm *kvm, int argc, const char **argv)
  657. {
  658. const struct option kvm_events_report_options[] = {
  659. OPT_STRING(0, "event", &kvm->report_event, "report event",
  660. "event for reporting: vmexit, mmio, ioport"),
  661. OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
  662. "vcpu id to report"),
  663. OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
  664. "key for sorting: sample(sort by samples number)"
  665. " time (sort by avg time)"),
  666. OPT_END()
  667. };
  668. const char * const kvm_events_report_usage[] = {
  669. "perf kvm stat report [<options>]",
  670. NULL
  671. };
  672. symbol__init();
  673. if (argc) {
  674. argc = parse_options(argc, argv,
  675. kvm_events_report_options,
  676. kvm_events_report_usage, 0);
  677. if (argc)
  678. usage_with_options(kvm_events_report_usage,
  679. kvm_events_report_options);
  680. }
  681. return kvm_events_report_vcpu(kvm);
  682. }
  683. static void print_kvm_stat_usage(void)
  684. {
  685. printf("Usage: perf kvm stat <command>\n\n");
  686. printf("# Available commands:\n");
  687. printf("\trecord: record kvm events\n");
  688. printf("\treport: report statistical data of kvm events\n");
  689. printf("\nOtherwise, it is the alias of 'perf stat':\n");
  690. }
  691. static int kvm_cmd_stat(struct perf_kvm *kvm, int argc, const char **argv)
  692. {
  693. if (argc == 1) {
  694. print_kvm_stat_usage();
  695. goto perf_stat;
  696. }
  697. if (!strncmp(argv[1], "rec", 3))
  698. return kvm_events_record(kvm, argc - 1, argv + 1);
  699. if (!strncmp(argv[1], "rep", 3))
  700. return kvm_events_report(kvm, argc - 1 , argv + 1);
  701. perf_stat:
  702. return cmd_stat(argc, argv, NULL);
  703. }
  704. static int __cmd_record(struct perf_kvm *kvm, int argc, const char **argv)
  705. {
  706. int rec_argc, i = 0, j;
  707. const char **rec_argv;
  708. rec_argc = argc + 2;
  709. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  710. rec_argv[i++] = strdup("record");
  711. rec_argv[i++] = strdup("-o");
  712. rec_argv[i++] = strdup(kvm->file_name);
  713. for (j = 1; j < argc; j++, i++)
  714. rec_argv[i] = argv[j];
  715. BUG_ON(i != rec_argc);
  716. return cmd_record(i, rec_argv, NULL);
  717. }
  718. static int __cmd_report(struct perf_kvm *kvm, int argc, const char **argv)
  719. {
  720. int rec_argc, i = 0, j;
  721. const char **rec_argv;
  722. rec_argc = argc + 2;
  723. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  724. rec_argv[i++] = strdup("report");
  725. rec_argv[i++] = strdup("-i");
  726. rec_argv[i++] = strdup(kvm->file_name);
  727. for (j = 1; j < argc; j++, i++)
  728. rec_argv[i] = argv[j];
  729. BUG_ON(i != rec_argc);
  730. return cmd_report(i, rec_argv, NULL);
  731. }
  732. static int __cmd_buildid_list(struct perf_kvm *kvm, int argc, const char **argv)
  733. {
  734. int rec_argc, i = 0, j;
  735. const char **rec_argv;
  736. rec_argc = argc + 2;
  737. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  738. rec_argv[i++] = strdup("buildid-list");
  739. rec_argv[i++] = strdup("-i");
  740. rec_argv[i++] = strdup(kvm->file_name);
  741. for (j = 1; j < argc; j++, i++)
  742. rec_argv[i] = argv[j];
  743. BUG_ON(i != rec_argc);
  744. return cmd_buildid_list(i, rec_argv, NULL);
  745. }
  746. int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
  747. {
  748. struct perf_kvm kvm = {
  749. .trace_vcpu = -1,
  750. .report_event = "vmexit",
  751. .sort_key = "sample",
  752. .exit_reasons = svm_exit_reasons,
  753. .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
  754. .exit_reasons_isa = "SVM",
  755. };
  756. const struct option kvm_options[] = {
  757. OPT_STRING('i', "input", &kvm.file_name, "file",
  758. "Input file name"),
  759. OPT_STRING('o', "output", &kvm.file_name, "file",
  760. "Output file name"),
  761. OPT_BOOLEAN(0, "guest", &perf_guest,
  762. "Collect guest os data"),
  763. OPT_BOOLEAN(0, "host", &perf_host,
  764. "Collect host os data"),
  765. OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
  766. "guest mount directory under which every guest os"
  767. " instance has a subdir"),
  768. OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
  769. "file", "file saving guest os vmlinux"),
  770. OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
  771. "file", "file saving guest os /proc/kallsyms"),
  772. OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
  773. "file", "file saving guest os /proc/modules"),
  774. OPT_END()
  775. };
  776. const char * const kvm_usage[] = {
  777. "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
  778. NULL
  779. };
  780. perf_host = 0;
  781. perf_guest = 1;
  782. argc = parse_options(argc, argv, kvm_options, kvm_usage,
  783. PARSE_OPT_STOP_AT_NON_OPTION);
  784. if (!argc)
  785. usage_with_options(kvm_usage, kvm_options);
  786. if (!perf_host)
  787. perf_guest = 1;
  788. if (!kvm.file_name) {
  789. if (perf_host && !perf_guest)
  790. kvm.file_name = strdup("perf.data.host");
  791. else if (!perf_host && perf_guest)
  792. kvm.file_name = strdup("perf.data.guest");
  793. else
  794. kvm.file_name = strdup("perf.data.kvm");
  795. if (!kvm.file_name) {
  796. pr_err("Failed to allocate memory for filename\n");
  797. return -ENOMEM;
  798. }
  799. }
  800. if (!strncmp(argv[0], "rec", 3))
  801. return __cmd_record(&kvm, argc, argv);
  802. else if (!strncmp(argv[0], "rep", 3))
  803. return __cmd_report(&kvm, argc, argv);
  804. else if (!strncmp(argv[0], "diff", 4))
  805. return cmd_diff(argc, argv, NULL);
  806. else if (!strncmp(argv[0], "top", 3))
  807. return cmd_top(argc, argv, NULL);
  808. else if (!strncmp(argv[0], "buildid-list", 12))
  809. return __cmd_buildid_list(&kvm, argc, argv);
  810. else if (!strncmp(argv[0], "stat", 4))
  811. return kvm_cmd_stat(&kvm, argc, argv);
  812. else
  813. usage_with_options(kvm_usage, kvm_options);
  814. return 0;
  815. }