mmutrace.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
  2. #define _TRACE_KVMMMU_H
  3. #include <linux/tracepoint.h>
  4. #include <linux/ftrace_event.h>
  5. #undef TRACE_SYSTEM
  6. #define TRACE_SYSTEM kvmmmu
  7. #define KVM_MMU_PAGE_FIELDS \
  8. __field(__u64, gfn) \
  9. __field(__u32, role) \
  10. __field(__u32, root_count) \
  11. __field(bool, unsync)
  12. #define KVM_MMU_PAGE_ASSIGN(sp) \
  13. __entry->gfn = sp->gfn; \
  14. __entry->role = sp->role.word; \
  15. __entry->root_count = sp->root_count; \
  16. __entry->unsync = sp->unsync;
  17. #define KVM_MMU_PAGE_PRINTK() ({ \
  18. const char *ret = p->buffer + p->len; \
  19. static const char *access_str[] = { \
  20. "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
  21. }; \
  22. union kvm_mmu_page_role role; \
  23. \
  24. role.word = __entry->role; \
  25. \
  26. trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
  27. " %snxe root %u %s%c", \
  28. __entry->gfn, role.level, \
  29. role.cr4_pae ? " pae" : "", \
  30. role.quadrant, \
  31. role.direct ? " direct" : "", \
  32. access_str[role.access], \
  33. role.invalid ? " invalid" : "", \
  34. role.nxe ? "" : "!", \
  35. __entry->root_count, \
  36. __entry->unsync ? "unsync" : "sync", 0); \
  37. ret; \
  38. })
  39. #define kvm_mmu_trace_pferr_flags \
  40. { PFERR_PRESENT_MASK, "P" }, \
  41. { PFERR_WRITE_MASK, "W" }, \
  42. { PFERR_USER_MASK, "U" }, \
  43. { PFERR_RSVD_MASK, "RSVD" }, \
  44. { PFERR_FETCH_MASK, "F" }
  45. /*
  46. * A pagetable walk has started
  47. */
  48. TRACE_EVENT(
  49. kvm_mmu_pagetable_walk,
  50. TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
  51. TP_ARGS(addr, write_fault, user_fault, fetch_fault),
  52. TP_STRUCT__entry(
  53. __field(__u64, addr)
  54. __field(__u32, pferr)
  55. ),
  56. TP_fast_assign(
  57. __entry->addr = addr;
  58. __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
  59. | (!!fetch_fault << 4);
  60. ),
  61. TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
  62. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  63. );
  64. /* We just walked a paging element */
  65. TRACE_EVENT(
  66. kvm_mmu_paging_element,
  67. TP_PROTO(u64 pte, int level),
  68. TP_ARGS(pte, level),
  69. TP_STRUCT__entry(
  70. __field(__u64, pte)
  71. __field(__u32, level)
  72. ),
  73. TP_fast_assign(
  74. __entry->pte = pte;
  75. __entry->level = level;
  76. ),
  77. TP_printk("pte %llx level %u", __entry->pte, __entry->level)
  78. );
  79. /* We set a pte accessed bit */
  80. TRACE_EVENT(
  81. kvm_mmu_set_accessed_bit,
  82. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  83. TP_ARGS(table_gfn, index, size),
  84. TP_STRUCT__entry(
  85. __field(__u64, gpa)
  86. ),
  87. TP_fast_assign(
  88. __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
  89. + index * size;
  90. ),
  91. TP_printk("gpa %llx", __entry->gpa)
  92. );
  93. /* We set a pte dirty bit */
  94. TRACE_EVENT(
  95. kvm_mmu_set_dirty_bit,
  96. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  97. TP_ARGS(table_gfn, index, size),
  98. TP_STRUCT__entry(
  99. __field(__u64, gpa)
  100. ),
  101. TP_fast_assign(
  102. __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
  103. + index * size;
  104. ),
  105. TP_printk("gpa %llx", __entry->gpa)
  106. );
  107. TRACE_EVENT(
  108. kvm_mmu_walker_error,
  109. TP_PROTO(u32 pferr),
  110. TP_ARGS(pferr),
  111. TP_STRUCT__entry(
  112. __field(__u32, pferr)
  113. ),
  114. TP_fast_assign(
  115. __entry->pferr = pferr;
  116. ),
  117. TP_printk("pferr %x %s", __entry->pferr,
  118. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  119. );
  120. TRACE_EVENT(
  121. kvm_mmu_get_page,
  122. TP_PROTO(struct kvm_mmu_page *sp, bool created),
  123. TP_ARGS(sp, created),
  124. TP_STRUCT__entry(
  125. KVM_MMU_PAGE_FIELDS
  126. __field(bool, created)
  127. ),
  128. TP_fast_assign(
  129. KVM_MMU_PAGE_ASSIGN(sp)
  130. __entry->created = created;
  131. ),
  132. TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
  133. __entry->created ? "new" : "existing")
  134. );
  135. TRACE_EVENT(
  136. kvm_mmu_sync_page,
  137. TP_PROTO(struct kvm_mmu_page *sp),
  138. TP_ARGS(sp),
  139. TP_STRUCT__entry(
  140. KVM_MMU_PAGE_FIELDS
  141. ),
  142. TP_fast_assign(
  143. KVM_MMU_PAGE_ASSIGN(sp)
  144. ),
  145. TP_printk("%s", KVM_MMU_PAGE_PRINTK())
  146. );
  147. TRACE_EVENT(
  148. kvm_mmu_unsync_page,
  149. TP_PROTO(struct kvm_mmu_page *sp),
  150. TP_ARGS(sp),
  151. TP_STRUCT__entry(
  152. KVM_MMU_PAGE_FIELDS
  153. ),
  154. TP_fast_assign(
  155. KVM_MMU_PAGE_ASSIGN(sp)
  156. ),
  157. TP_printk("%s", KVM_MMU_PAGE_PRINTK())
  158. );
  159. TRACE_EVENT(
  160. kvm_mmu_zap_page,
  161. TP_PROTO(struct kvm_mmu_page *sp),
  162. TP_ARGS(sp),
  163. TP_STRUCT__entry(
  164. KVM_MMU_PAGE_FIELDS
  165. ),
  166. TP_fast_assign(
  167. KVM_MMU_PAGE_ASSIGN(sp)
  168. ),
  169. TP_printk("%s", KVM_MMU_PAGE_PRINTK())
  170. );
  171. #endif /* _TRACE_KVMMMU_H */
  172. #undef TRACE_INCLUDE_PATH
  173. #define TRACE_INCLUDE_PATH .
  174. #undef TRACE_INCLUDE_FILE
  175. #define TRACE_INCLUDE_FILE mmutrace
  176. /* This part must be outside protection */
  177. #include <trace/define_trace.h>