trace_pr.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. #if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
  2. #define _TRACE_KVM_PR_H
  3. #include <linux/tracepoint.h>
  4. #undef TRACE_SYSTEM
  5. #define TRACE_SYSTEM kvm_pr
  6. #define TRACE_INCLUDE_PATH .
  7. #define TRACE_INCLUDE_FILE trace_pr
  8. #define kvm_trace_symbol_exit \
  9. {0x100, "SYSTEM_RESET"}, \
  10. {0x200, "MACHINE_CHECK"}, \
  11. {0x300, "DATA_STORAGE"}, \
  12. {0x380, "DATA_SEGMENT"}, \
  13. {0x400, "INST_STORAGE"}, \
  14. {0x480, "INST_SEGMENT"}, \
  15. {0x500, "EXTERNAL"}, \
  16. {0x501, "EXTERNAL_LEVEL"}, \
  17. {0x502, "EXTERNAL_HV"}, \
  18. {0x600, "ALIGNMENT"}, \
  19. {0x700, "PROGRAM"}, \
  20. {0x800, "FP_UNAVAIL"}, \
  21. {0x900, "DECREMENTER"}, \
  22. {0x980, "HV_DECREMENTER"}, \
  23. {0xc00, "SYSCALL"}, \
  24. {0xd00, "TRACE"}, \
  25. {0xe00, "H_DATA_STORAGE"}, \
  26. {0xe20, "H_INST_STORAGE"}, \
  27. {0xe40, "H_EMUL_ASSIST"}, \
  28. {0xf00, "PERFMON"}, \
  29. {0xf20, "ALTIVEC"}, \
  30. {0xf40, "VSX"}
  31. TRACE_EVENT(kvm_book3s_reenter,
  32. TP_PROTO(int r, struct kvm_vcpu *vcpu),
  33. TP_ARGS(r, vcpu),
  34. TP_STRUCT__entry(
  35. __field( unsigned int, r )
  36. __field( unsigned long, pc )
  37. ),
  38. TP_fast_assign(
  39. __entry->r = r;
  40. __entry->pc = kvmppc_get_pc(vcpu);
  41. ),
  42. TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
  43. );
  44. #ifdef CONFIG_PPC_BOOK3S_64
  45. TRACE_EVENT(kvm_book3s_64_mmu_map,
  46. TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr,
  47. struct kvmppc_pte *orig_pte),
  48. TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
  49. TP_STRUCT__entry(
  50. __field( unsigned char, flag_w )
  51. __field( unsigned char, flag_x )
  52. __field( unsigned long, eaddr )
  53. __field( unsigned long, hpteg )
  54. __field( unsigned long, va )
  55. __field( unsigned long long, vpage )
  56. __field( unsigned long, hpaddr )
  57. ),
  58. TP_fast_assign(
  59. __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
  60. __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
  61. __entry->eaddr = orig_pte->eaddr;
  62. __entry->hpteg = hpteg;
  63. __entry->va = va;
  64. __entry->vpage = orig_pte->vpage;
  65. __entry->hpaddr = hpaddr;
  66. ),
  67. TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
  68. __entry->flag_w, __entry->flag_x, __entry->eaddr,
  69. __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
  70. );
  71. #endif /* CONFIG_PPC_BOOK3S_64 */
  72. TRACE_EVENT(kvm_book3s_mmu_map,
  73. TP_PROTO(struct hpte_cache *pte),
  74. TP_ARGS(pte),
  75. TP_STRUCT__entry(
  76. __field( u64, host_vpn )
  77. __field( u64, pfn )
  78. __field( ulong, eaddr )
  79. __field( u64, vpage )
  80. __field( ulong, raddr )
  81. __field( int, flags )
  82. ),
  83. TP_fast_assign(
  84. __entry->host_vpn = pte->host_vpn;
  85. __entry->pfn = pte->pfn;
  86. __entry->eaddr = pte->pte.eaddr;
  87. __entry->vpage = pte->pte.vpage;
  88. __entry->raddr = pte->pte.raddr;
  89. __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
  90. (pte->pte.may_write ? 0x2 : 0) |
  91. (pte->pte.may_execute ? 0x1 : 0);
  92. ),
  93. TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
  94. __entry->host_vpn, __entry->pfn, __entry->eaddr,
  95. __entry->vpage, __entry->raddr, __entry->flags)
  96. );
  97. TRACE_EVENT(kvm_book3s_mmu_invalidate,
  98. TP_PROTO(struct hpte_cache *pte),
  99. TP_ARGS(pte),
  100. TP_STRUCT__entry(
  101. __field( u64, host_vpn )
  102. __field( u64, pfn )
  103. __field( ulong, eaddr )
  104. __field( u64, vpage )
  105. __field( ulong, raddr )
  106. __field( int, flags )
  107. ),
  108. TP_fast_assign(
  109. __entry->host_vpn = pte->host_vpn;
  110. __entry->pfn = pte->pfn;
  111. __entry->eaddr = pte->pte.eaddr;
  112. __entry->vpage = pte->pte.vpage;
  113. __entry->raddr = pte->pte.raddr;
  114. __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
  115. (pte->pte.may_write ? 0x2 : 0) |
  116. (pte->pte.may_execute ? 0x1 : 0);
  117. ),
  118. TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
  119. __entry->host_vpn, __entry->pfn, __entry->eaddr,
  120. __entry->vpage, __entry->raddr, __entry->flags)
  121. );
  122. TRACE_EVENT(kvm_book3s_mmu_flush,
  123. TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
  124. unsigned long long p2),
  125. TP_ARGS(type, vcpu, p1, p2),
  126. TP_STRUCT__entry(
  127. __field( int, count )
  128. __field( unsigned long long, p1 )
  129. __field( unsigned long long, p2 )
  130. __field( const char *, type )
  131. ),
  132. TP_fast_assign(
  133. __entry->count = to_book3s(vcpu)->hpte_cache_count;
  134. __entry->p1 = p1;
  135. __entry->p2 = p2;
  136. __entry->type = type;
  137. ),
  138. TP_printk("Flush %d %sPTEs: %llx - %llx",
  139. __entry->count, __entry->type, __entry->p1, __entry->p2)
  140. );
  141. TRACE_EVENT(kvm_book3s_slb_found,
  142. TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
  143. TP_ARGS(gvsid, hvsid),
  144. TP_STRUCT__entry(
  145. __field( unsigned long long, gvsid )
  146. __field( unsigned long long, hvsid )
  147. ),
  148. TP_fast_assign(
  149. __entry->gvsid = gvsid;
  150. __entry->hvsid = hvsid;
  151. ),
  152. TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
  153. );
  154. TRACE_EVENT(kvm_book3s_slb_fail,
  155. TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
  156. TP_ARGS(sid_map_mask, gvsid),
  157. TP_STRUCT__entry(
  158. __field( unsigned short, sid_map_mask )
  159. __field( unsigned long long, gvsid )
  160. ),
  161. TP_fast_assign(
  162. __entry->sid_map_mask = sid_map_mask;
  163. __entry->gvsid = gvsid;
  164. ),
  165. TP_printk("%x/%x: %llx", __entry->sid_map_mask,
  166. SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
  167. );
  168. TRACE_EVENT(kvm_book3s_slb_map,
  169. TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
  170. unsigned long long hvsid),
  171. TP_ARGS(sid_map_mask, gvsid, hvsid),
  172. TP_STRUCT__entry(
  173. __field( unsigned short, sid_map_mask )
  174. __field( unsigned long long, guest_vsid )
  175. __field( unsigned long long, host_vsid )
  176. ),
  177. TP_fast_assign(
  178. __entry->sid_map_mask = sid_map_mask;
  179. __entry->guest_vsid = gvsid;
  180. __entry->host_vsid = hvsid;
  181. ),
  182. TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
  183. __entry->guest_vsid, __entry->host_vsid)
  184. );
  185. TRACE_EVENT(kvm_book3s_slbmte,
  186. TP_PROTO(u64 slb_vsid, u64 slb_esid),
  187. TP_ARGS(slb_vsid, slb_esid),
  188. TP_STRUCT__entry(
  189. __field( u64, slb_vsid )
  190. __field( u64, slb_esid )
  191. ),
  192. TP_fast_assign(
  193. __entry->slb_vsid = slb_vsid;
  194. __entry->slb_esid = slb_esid;
  195. ),
  196. TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
  197. );
  198. TRACE_EVENT(kvm_exit,
  199. TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
  200. TP_ARGS(exit_nr, vcpu),
  201. TP_STRUCT__entry(
  202. __field( unsigned int, exit_nr )
  203. __field( unsigned long, pc )
  204. __field( unsigned long, msr )
  205. __field( unsigned long, dar )
  206. __field( unsigned long, srr1 )
  207. __field( unsigned long, last_inst )
  208. ),
  209. TP_fast_assign(
  210. __entry->exit_nr = exit_nr;
  211. __entry->pc = kvmppc_get_pc(vcpu);
  212. __entry->dar = kvmppc_get_fault_dar(vcpu);
  213. __entry->msr = vcpu->arch.shared->msr;
  214. __entry->srr1 = vcpu->arch.shadow_srr1;
  215. __entry->last_inst = vcpu->arch.last_inst;
  216. ),
  217. TP_printk("exit=%s"
  218. " | pc=0x%lx"
  219. " | msr=0x%lx"
  220. " | dar=0x%lx"
  221. " | srr1=0x%lx"
  222. " | last_inst=0x%lx"
  223. ,
  224. __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
  225. __entry->pc,
  226. __entry->msr,
  227. __entry->dar,
  228. __entry->srr1,
  229. __entry->last_inst
  230. )
  231. );
  232. TRACE_EVENT(kvm_unmap_hva,
  233. TP_PROTO(unsigned long hva),
  234. TP_ARGS(hva),
  235. TP_STRUCT__entry(
  236. __field( unsigned long, hva )
  237. ),
  238. TP_fast_assign(
  239. __entry->hva = hva;
  240. ),
  241. TP_printk("unmap hva 0x%lx\n", __entry->hva)
  242. );
  243. #endif /* _TRACE_KVM_H */
  244. /* This part must be outside protection */
  245. #include <trace/define_trace.h>