kvm.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. #if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
  2. #define _TRACE_KVM_MAIN_H
  3. #include <linux/tracepoint.h>
  4. #undef TRACE_SYSTEM
  5. #define TRACE_SYSTEM kvm
  6. #if defined(__KVM_HAVE_IOAPIC)
  7. TRACE_EVENT(kvm_set_irq,
  8. TP_PROTO(unsigned int gsi, int level, int irq_source_id),
  9. TP_ARGS(gsi, level, irq_source_id),
  10. TP_STRUCT__entry(
  11. __field( unsigned int, gsi )
  12. __field( int, level )
  13. __field( int, irq_source_id )
  14. ),
  15. TP_fast_assign(
  16. __entry->gsi = gsi;
  17. __entry->level = level;
  18. __entry->irq_source_id = irq_source_id;
  19. ),
  20. TP_printk("gsi %u level %d source %d",
  21. __entry->gsi, __entry->level, __entry->irq_source_id)
  22. );
  23. #define kvm_deliver_mode \
  24. {0x0, "Fixed"}, \
  25. {0x1, "LowPrio"}, \
  26. {0x2, "SMI"}, \
  27. {0x3, "Res3"}, \
  28. {0x4, "NMI"}, \
  29. {0x5, "INIT"}, \
  30. {0x6, "SIPI"}, \
  31. {0x7, "ExtINT"}
  32. TRACE_EVENT(kvm_ioapic_set_irq,
  33. TP_PROTO(__u64 e, int pin, bool coalesced),
  34. TP_ARGS(e, pin, coalesced),
  35. TP_STRUCT__entry(
  36. __field( __u64, e )
  37. __field( int, pin )
  38. __field( bool, coalesced )
  39. ),
  40. TP_fast_assign(
  41. __entry->e = e;
  42. __entry->pin = pin;
  43. __entry->coalesced = coalesced;
  44. ),
  45. TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
  46. __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
  47. __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
  48. (__entry->e & (1<<11)) ? "logical" : "physical",
  49. (__entry->e & (1<<15)) ? "level" : "edge",
  50. (__entry->e & (1<<16)) ? "|masked" : "",
  51. __entry->coalesced ? " (coalesced)" : "")
  52. );
  53. TRACE_EVENT(kvm_msi_set_irq,
  54. TP_PROTO(__u64 address, __u64 data),
  55. TP_ARGS(address, data),
  56. TP_STRUCT__entry(
  57. __field( __u64, address )
  58. __field( __u64, data )
  59. ),
  60. TP_fast_assign(
  61. __entry->address = address;
  62. __entry->data = data;
  63. ),
  64. TP_printk("dst %u vec %x (%s|%s|%s%s)",
  65. (u8)(__entry->address >> 12), (u8)__entry->data,
  66. __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
  67. (__entry->address & (1<<2)) ? "logical" : "physical",
  68. (__entry->data & (1<<15)) ? "level" : "edge",
  69. (__entry->address & (1<<3)) ? "|rh" : "")
  70. );
  71. #define kvm_irqchips \
  72. {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
  73. {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
  74. {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
  75. TRACE_EVENT(kvm_ack_irq,
  76. TP_PROTO(unsigned int irqchip, unsigned int pin),
  77. TP_ARGS(irqchip, pin),
  78. TP_STRUCT__entry(
  79. __field( unsigned int, irqchip )
  80. __field( unsigned int, pin )
  81. ),
  82. TP_fast_assign(
  83. __entry->irqchip = irqchip;
  84. __entry->pin = pin;
  85. ),
  86. TP_printk("irqchip %s pin %u",
  87. __print_symbolic(__entry->irqchip, kvm_irqchips),
  88. __entry->pin)
  89. );
  90. #endif /* defined(__KVM_HAVE_IOAPIC) */
  91. #define KVM_TRACE_MMIO_READ_UNSATISFIED 0
  92. #define KVM_TRACE_MMIO_READ 1
  93. #define KVM_TRACE_MMIO_WRITE 2
  94. #define kvm_trace_symbol_mmio \
  95. { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
  96. { KVM_TRACE_MMIO_READ, "read" }, \
  97. { KVM_TRACE_MMIO_WRITE, "write" }
  98. TRACE_EVENT(kvm_mmio,
  99. TP_PROTO(int type, int len, u64 gpa, u64 val),
  100. TP_ARGS(type, len, gpa, val),
  101. TP_STRUCT__entry(
  102. __field( u32, type )
  103. __field( u32, len )
  104. __field( u64, gpa )
  105. __field( u64, val )
  106. ),
  107. TP_fast_assign(
  108. __entry->type = type;
  109. __entry->len = len;
  110. __entry->gpa = gpa;
  111. __entry->val = val;
  112. ),
  113. TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
  114. __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
  115. __entry->len, __entry->gpa, __entry->val)
  116. );
  117. #define kvm_fpu_load_symbol \
  118. {0, "unload"}, \
  119. {1, "load"}
  120. TRACE_EVENT(kvm_fpu,
  121. TP_PROTO(int load),
  122. TP_ARGS(load),
  123. TP_STRUCT__entry(
  124. __field( u32, load )
  125. ),
  126. TP_fast_assign(
  127. __entry->load = load;
  128. ),
  129. TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
  130. );
  131. TRACE_EVENT(kvm_age_page,
  132. TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
  133. TP_ARGS(hva, slot, ref),
  134. TP_STRUCT__entry(
  135. __field( u64, hva )
  136. __field( u64, gfn )
  137. __field( u8, referenced )
  138. ),
  139. TP_fast_assign(
  140. __entry->hva = hva;
  141. __entry->gfn =
  142. slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
  143. __entry->referenced = ref;
  144. ),
  145. TP_printk("hva %llx gfn %llx %s",
  146. __entry->hva, __entry->gfn,
  147. __entry->referenced ? "YOUNG" : "OLD")
  148. );
  149. #ifdef CONFIG_KVM_ASYNC_PF
  150. TRACE_EVENT(
  151. kvm_try_async_get_page,
  152. TP_PROTO(bool async, u64 pfn),
  153. TP_ARGS(async, pfn),
  154. TP_STRUCT__entry(
  155. __field(__u64, pfn)
  156. ),
  157. TP_fast_assign(
  158. __entry->pfn = (!async) ? pfn : (u64)-1;
  159. ),
  160. TP_printk("pfn %#llx", __entry->pfn)
  161. );
  162. TRACE_EVENT(
  163. kvm_async_pf_not_present,
  164. TP_PROTO(u64 token, u64 gva),
  165. TP_ARGS(token, gva),
  166. TP_STRUCT__entry(
  167. __field(__u64, token)
  168. __field(__u64, gva)
  169. ),
  170. TP_fast_assign(
  171. __entry->token = token;
  172. __entry->gva = gva;
  173. ),
  174. TP_printk("token %#llx gva %#llx not present", __entry->token,
  175. __entry->gva)
  176. );
  177. TRACE_EVENT(
  178. kvm_async_pf_ready,
  179. TP_PROTO(u64 token, u64 gva),
  180. TP_ARGS(token, gva),
  181. TP_STRUCT__entry(
  182. __field(__u64, token)
  183. __field(__u64, gva)
  184. ),
  185. TP_fast_assign(
  186. __entry->token = token;
  187. __entry->gva = gva;
  188. ),
  189. TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva)
  190. );
  191. TRACE_EVENT(
  192. kvm_async_pf_completed,
  193. TP_PROTO(unsigned long address, struct page *page, u64 gva),
  194. TP_ARGS(address, page, gva),
  195. TP_STRUCT__entry(
  196. __field(unsigned long, address)
  197. __field(pfn_t, pfn)
  198. __field(u64, gva)
  199. ),
  200. TP_fast_assign(
  201. __entry->address = address;
  202. __entry->pfn = page ? page_to_pfn(page) : 0;
  203. __entry->gva = gva;
  204. ),
  205. TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
  206. __entry->address, __entry->pfn)
  207. );
  208. TRACE_EVENT(
  209. kvm_async_pf_doublefault,
  210. TP_PROTO(u64 gva, u64 gfn),
  211. TP_ARGS(gva, gfn),
  212. TP_STRUCT__entry(
  213. __field(u64, gva)
  214. __field(u64, gfn)
  215. ),
  216. TP_fast_assign(
  217. __entry->gva = gva;
  218. __entry->gfn = gfn;
  219. ),
  220. TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
  221. );
  222. #endif
  223. #endif /* _TRACE_KVM_MAIN_H */
  224. /* This part must be outside protection */
  225. #include <trace/define_trace.h>