perf_event.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #ifndef _ASM_X86_PERF_EVENT_H
  2. #define _ASM_X86_PERF_EVENT_H
  3. /*
  4. * Performance event hw details:
  5. */
  6. #define INTEL_PMC_MAX_GENERIC 32
  7. #define INTEL_PMC_MAX_FIXED 3
  8. #define INTEL_PMC_IDX_FIXED 32
  9. #define X86_PMC_IDX_MAX 64
  10. #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
  11. #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
  12. #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
  13. #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
  14. #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
  15. #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
  16. #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
  17. #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
  18. #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
  19. #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
  20. #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
  21. #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
  22. #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
  23. #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
  24. #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
  25. #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
  26. #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
  27. #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
  28. #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
  29. #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
  30. (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
  31. #define AMD64_EVENTSEL_EVENT \
  32. (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
  33. #define INTEL_ARCH_EVENT_MASK \
  34. (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
  35. #define X86_RAW_EVENT_MASK \
  36. (ARCH_PERFMON_EVENTSEL_EVENT | \
  37. ARCH_PERFMON_EVENTSEL_UMASK | \
  38. ARCH_PERFMON_EVENTSEL_EDGE | \
  39. ARCH_PERFMON_EVENTSEL_INV | \
  40. ARCH_PERFMON_EVENTSEL_CMASK)
  41. #define AMD64_RAW_EVENT_MASK \
  42. (X86_RAW_EVENT_MASK | \
  43. AMD64_EVENTSEL_EVENT)
  44. #define AMD64_RAW_EVENT_MASK_NB \
  45. (AMD64_EVENTSEL_EVENT | \
  46. ARCH_PERFMON_EVENTSEL_UMASK)
  47. #define AMD64_NUM_COUNTERS 4
  48. #define AMD64_NUM_COUNTERS_CORE 6
  49. #define AMD64_NUM_COUNTERS_NB 4
  50. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
  51. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
  52. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
  53. #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
  54. (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
  55. #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
  56. #define ARCH_PERFMON_EVENTS_COUNT 7
  57. /*
  58. * Intel "Architectural Performance Monitoring" CPUID
  59. * detection/enumeration details:
  60. */
  61. union cpuid10_eax {
  62. struct {
  63. unsigned int version_id:8;
  64. unsigned int num_counters:8;
  65. unsigned int bit_width:8;
  66. unsigned int mask_length:8;
  67. } split;
  68. unsigned int full;
  69. };
  70. union cpuid10_ebx {
  71. struct {
  72. unsigned int no_unhalted_core_cycles:1;
  73. unsigned int no_instructions_retired:1;
  74. unsigned int no_unhalted_reference_cycles:1;
  75. unsigned int no_llc_reference:1;
  76. unsigned int no_llc_misses:1;
  77. unsigned int no_branch_instruction_retired:1;
  78. unsigned int no_branch_misses_retired:1;
  79. } split;
  80. unsigned int full;
  81. };
  82. union cpuid10_edx {
  83. struct {
  84. unsigned int num_counters_fixed:5;
  85. unsigned int bit_width_fixed:8;
  86. unsigned int reserved:19;
  87. } split;
  88. unsigned int full;
  89. };
  90. struct x86_pmu_capability {
  91. int version;
  92. int num_counters_gp;
  93. int num_counters_fixed;
  94. int bit_width_gp;
  95. int bit_width_fixed;
  96. unsigned int events_mask;
  97. int events_mask_len;
  98. };
  99. /*
  100. * Fixed-purpose performance events:
  101. */
  102. /*
  103. * All 3 fixed-mode PMCs are configured via this single MSR:
  104. */
  105. #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
  106. /*
  107. * The counts are available in three separate MSRs:
  108. */
  109. /* Instr_Retired.Any: */
  110. #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
  111. #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
  112. /* CPU_CLK_Unhalted.Core: */
  113. #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
  114. #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
  115. /* CPU_CLK_Unhalted.Ref: */
  116. #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
  117. #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
  118. #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
  119. /*
  120. * We model BTS tracing as another fixed-mode PMC.
  121. *
  122. * We choose a value in the middle of the fixed event range, since lower
  123. * values are used by actual fixed events and higher values are used
  124. * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
  125. */
  126. #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
  127. /*
  128. * IBS cpuid feature detection
  129. */
  130. #define IBS_CPUID_FEATURES 0x8000001b
  131. /*
  132. * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
  133. * bit 0 is used to indicate the existence of IBS.
  134. */
  135. #define IBS_CAPS_AVAIL (1U<<0)
  136. #define IBS_CAPS_FETCHSAM (1U<<1)
  137. #define IBS_CAPS_OPSAM (1U<<2)
  138. #define IBS_CAPS_RDWROPCNT (1U<<3)
  139. #define IBS_CAPS_OPCNT (1U<<4)
  140. #define IBS_CAPS_BRNTRGT (1U<<5)
  141. #define IBS_CAPS_OPCNTEXT (1U<<6)
  142. #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
  143. #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
  144. | IBS_CAPS_FETCHSAM \
  145. | IBS_CAPS_OPSAM)
  146. /*
  147. * IBS APIC setup
  148. */
  149. #define IBSCTL 0x1cc
  150. #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
  151. #define IBSCTL_LVT_OFFSET_MASK 0x0F
  152. /* ibs fetch bits/masks */
  153. #define IBS_FETCH_RAND_EN (1ULL<<57)
  154. #define IBS_FETCH_VAL (1ULL<<49)
  155. #define IBS_FETCH_ENABLE (1ULL<<48)
  156. #define IBS_FETCH_CNT 0xFFFF0000ULL
  157. #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
  158. /* ibs op bits/masks */
  159. /* lower 4 bits of the current count are ignored: */
  160. #define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
  161. #define IBS_OP_CNT_CTL (1ULL<<19)
  162. #define IBS_OP_VAL (1ULL<<18)
  163. #define IBS_OP_ENABLE (1ULL<<17)
  164. #define IBS_OP_MAX_CNT 0x0000FFFFULL
  165. #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
  166. #define IBS_RIP_INVALID (1ULL<<38)
  167. #ifdef CONFIG_X86_LOCAL_APIC
  168. extern u32 get_ibs_caps(void);
  169. #else
  170. static inline u32 get_ibs_caps(void) { return 0; }
  171. #endif
  172. #ifdef CONFIG_PERF_EVENTS
  173. extern void perf_events_lapic_init(void);
  174. /*
  175. * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
  176. * unused and ABI specified to be 0, so nobody should care what we do with
  177. * them.
  178. *
  179. * EXACT - the IP points to the exact instruction that triggered the
  180. * event (HW bugs exempt).
  181. * VM - original X86_VM_MASK; see set_linear_ip().
  182. */
  183. #define PERF_EFLAGS_EXACT (1UL << 3)
  184. #define PERF_EFLAGS_VM (1UL << 5)
  185. struct pt_regs;
  186. extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  187. extern unsigned long perf_misc_flags(struct pt_regs *regs);
  188. #define perf_misc_flags(regs) perf_misc_flags(regs)
  189. #include <asm/stacktrace.h>
  190. /*
  191. * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
  192. * and the comment with PERF_EFLAGS_EXACT.
  193. */
  194. #define perf_arch_fetch_caller_regs(regs, __ip) { \
  195. (regs)->ip = (__ip); \
  196. (regs)->bp = caller_frame_pointer(); \
  197. (regs)->cs = __KERNEL_CS; \
  198. regs->flags = 0; \
  199. asm volatile( \
  200. _ASM_MOV "%%"_ASM_SP ", %0\n" \
  201. : "=m" ((regs)->sp) \
  202. :: "memory" \
  203. ); \
  204. }
  205. struct perf_guest_switch_msr {
  206. unsigned msr;
  207. u64 host, guest;
  208. };
  209. extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
  210. extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
  211. extern void perf_check_microcode(void);
  212. #else
  213. static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
  214. {
  215. *nr = 0;
  216. return NULL;
  217. }
  218. static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  219. {
  220. memset(cap, 0, sizeof(*cap));
  221. }
  222. static inline void perf_events_lapic_init(void) { }
  223. static inline void perf_check_microcode(void) { }
  224. #endif
  225. #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
  226. extern void amd_pmu_enable_virt(void);
  227. extern void amd_pmu_disable_virt(void);
  228. #else
  229. static inline void amd_pmu_enable_virt(void) { }
  230. static inline void amd_pmu_disable_virt(void) { }
  231. #endif
  232. #define arch_perf_out_copy_user copy_from_user_nmi
  233. #endif /* _ASM_X86_PERF_EVENT_H */