perf_event.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #ifndef _ASM_X86_PERF_EVENT_H
  2. #define _ASM_X86_PERF_EVENT_H
  3. /*
  4. * Performance event hw details:
  5. */
  6. #define INTEL_PMC_MAX_GENERIC 32
  7. #define INTEL_PMC_MAX_FIXED 3
  8. #define INTEL_PMC_IDX_FIXED 32
  9. #define X86_PMC_IDX_MAX 64
  10. #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
  11. #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
  12. #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
  13. #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
  14. #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
  15. #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
  16. #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
  17. #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
  18. #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
  19. #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
  20. #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
  21. #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
  22. #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
  23. #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
  24. #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
  25. #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
  26. #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
  27. #define AMD64_EVENTSEL_EVENT \
  28. (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
  29. #define INTEL_ARCH_EVENT_MASK \
  30. (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
  31. #define X86_RAW_EVENT_MASK \
  32. (ARCH_PERFMON_EVENTSEL_EVENT | \
  33. ARCH_PERFMON_EVENTSEL_UMASK | \
  34. ARCH_PERFMON_EVENTSEL_EDGE | \
  35. ARCH_PERFMON_EVENTSEL_INV | \
  36. ARCH_PERFMON_EVENTSEL_CMASK)
  37. #define AMD64_RAW_EVENT_MASK \
  38. (X86_RAW_EVENT_MASK | \
  39. AMD64_EVENTSEL_EVENT)
  40. #define AMD64_NUM_COUNTERS 4
  41. #define AMD64_NUM_COUNTERS_CORE 6
  42. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
  43. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
  44. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
  45. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
  46. (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
  47. #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
  48. #define ARCH_PERFMON_EVENTS_COUNT 7
  49. /*
  50. * Intel "Architectural Performance Monitoring" CPUID
  51. * detection/enumeration details:
  52. */
  53. union cpuid10_eax {
  54. struct {
  55. unsigned int version_id:8;
  56. unsigned int num_counters:8;
  57. unsigned int bit_width:8;
  58. unsigned int mask_length:8;
  59. } split;
  60. unsigned int full;
  61. };
  62. union cpuid10_ebx {
  63. struct {
  64. unsigned int no_unhalted_core_cycles:1;
  65. unsigned int no_instructions_retired:1;
  66. unsigned int no_unhalted_reference_cycles:1;
  67. unsigned int no_llc_reference:1;
  68. unsigned int no_llc_misses:1;
  69. unsigned int no_branch_instruction_retired:1;
  70. unsigned int no_branch_misses_retired:1;
  71. } split;
  72. unsigned int full;
  73. };
  74. union cpuid10_edx {
  75. struct {
  76. unsigned int num_counters_fixed:5;
  77. unsigned int bit_width_fixed:8;
  78. unsigned int reserved:19;
  79. } split;
  80. unsigned int full;
  81. };
  82. struct x86_pmu_capability {
  83. int version;
  84. int num_counters_gp;
  85. int num_counters_fixed;
  86. int bit_width_gp;
  87. int bit_width_fixed;
  88. unsigned int events_mask;
  89. int events_mask_len;
  90. };
  91. /*
  92. * Fixed-purpose performance events:
  93. */
  94. /*
  95. * All 3 fixed-mode PMCs are configured via this single MSR:
  96. */
  97. #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
  98. /*
  99. * The counts are available in three separate MSRs:
  100. */
  101. /* Instr_Retired.Any: */
  102. #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
  103. #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
  104. /* CPU_CLK_Unhalted.Core: */
  105. #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
  106. #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
  107. /* CPU_CLK_Unhalted.Ref: */
  108. #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
  109. #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
  110. #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
  111. /*
  112. * We model BTS tracing as another fixed-mode PMC.
  113. *
  114. * We choose a value in the middle of the fixed event range, since lower
  115. * values are used by actual fixed events and higher values are used
  116. * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  117. */
  118. #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
  119. /*
  120. * IBS cpuid feature detection
  121. */
  122. #define IBS_CPUID_FEATURES 0x8000001b
  123. /*
  124. * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
  125. * bit 0 is used to indicate the existence of IBS.
  126. */
  127. #define IBS_CAPS_AVAIL (1U<<0)
  128. #define IBS_CAPS_FETCHSAM (1U<<1)
  129. #define IBS_CAPS_OPSAM (1U<<2)
  130. #define IBS_CAPS_RDWROPCNT (1U<<3)
  131. #define IBS_CAPS_OPCNT (1U<<4)
  132. #define IBS_CAPS_BRNTRGT (1U<<5)
  133. #define IBS_CAPS_OPCNTEXT (1U<<6)
  134. #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
  135. #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
  136. | IBS_CAPS_FETCHSAM \
  137. | IBS_CAPS_OPSAM)
  138. /*
  139. * IBS APIC setup
  140. */
  141. #define IBSCTL 0x1cc
  142. #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
  143. #define IBSCTL_LVT_OFFSET_MASK 0x0F
  144. /* ibs fetch bits/masks */
  145. #define IBS_FETCH_RAND_EN (1ULL<<57)
  146. #define IBS_FETCH_VAL (1ULL<<49)
  147. #define IBS_FETCH_ENABLE (1ULL<<48)
  148. #define IBS_FETCH_CNT 0xFFFF0000ULL
  149. #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
  150. /* ibs op bits/masks */
  151. /* lower 4 bits of the current count are ignored: */
  152. #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
  153. #define IBS_OP_CNT_CTL (1ULL<<19)
  154. #define IBS_OP_VAL (1ULL<<18)
  155. #define IBS_OP_ENABLE (1ULL<<17)
  156. #define IBS_OP_MAX_CNT 0x0000FFFFULL
  157. #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
  158. #define IBS_RIP_INVALID (1ULL<<38)
  159. #ifdef CONFIG_X86_LOCAL_APIC
  160. extern u32 get_ibs_caps(void);
  161. #else
  162. static inline u32 get_ibs_caps(void) { return 0; }
  163. #endif
  164. #ifdef CONFIG_PERF_EVENTS
  165. extern void perf_events_lapic_init(void);
  166. /*
  167. * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
  168. * unused and ABI specified to be 0, so nobody should care what we do with
  169. * them.
  170. *
  171. * EXACT - the IP points to the exact instruction that triggered the
  172. * event (HW bugs exempt).
  173. * VM - original X86_VM_MASK; see set_linear_ip().
  174. */
  175. #define PERF_EFLAGS_EXACT (1UL << 3)
  176. #define PERF_EFLAGS_VM (1UL << 5)
  177. struct pt_regs;
  178. extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  179. extern unsigned long perf_misc_flags(struct pt_regs *regs);
  180. #define perf_misc_flags(regs) perf_misc_flags(regs)
  181. #include <asm/stacktrace.h>
  182. /*
  183. * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
  184. * and the comment with PERF_EFLAGS_EXACT.
  185. */
  186. #define perf_arch_fetch_caller_regs(regs, __ip) { \
  187. (regs)->ip = (__ip); \
  188. (regs)->bp = caller_frame_pointer(); \
  189. (regs)->cs = __KERNEL_CS; \
  190. regs->flags = 0; \
  191. asm volatile( \
  192. _ASM_MOV "%%"_ASM_SP ", %0\n" \
  193. : "=m" ((regs)->sp) \
  194. :: "memory" \
  195. ); \
  196. }
  197. struct perf_guest_switch_msr {
  198. unsigned msr;
  199. u64 host, guest;
  200. };
  201. extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
  202. extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
  203. extern void perf_check_microcode(void);
  204. #else
  205. static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
  206. {
  207. *nr = 0;
  208. return NULL;
  209. }
  210. static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  211. {
  212. memset(cap, 0, sizeof(*cap));
  213. }
  214. static inline void perf_events_lapic_init(void) { }
  215. static inline void perf_check_microcode(void) { }
  216. #endif
  217. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  218. extern void amd_pmu_enable_virt(void);
  219. extern void amd_pmu_disable_virt(void);
  220. #else
  221. static inline void amd_pmu_enable_virt(void) { }
  222. static inline void amd_pmu_disable_virt(void) { }
  223. #endif
  224. #define arch_perf_out_copy_user copy_from_user_nmi
  225. #endif /* _ASM_X86_PERF_EVENT_H */