perf_event_intel.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. #ifdef CONFIG_CPU_SUP_INTEL
  2. /*
  3. * Intel PerfMon, used on Core and later.
  4. */
  5. static const u64 intel_perfmon_event_map[] =
  6. {
  7. [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
  8. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  9. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
  10. [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
  11. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  12. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  13. [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
  14. };
  15. static struct event_constraint intel_core_event_constraints[] =
  16. {
  17. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  18. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  19. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  20. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  21. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  22. INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  23. EVENT_CONSTRAINT_END
  24. };
  25. static struct event_constraint intel_core2_event_constraints[] =
  26. {
  27. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  28. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  29. /*
  30. * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
  31. * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
  32. * ratio between these counters.
  33. */
  34. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  35. INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  36. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  37. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  38. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  39. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  40. INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  41. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  42. INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  43. INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  44. INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  45. EVENT_CONSTRAINT_END
  46. };
  47. static struct event_constraint intel_nehalem_event_constraints[] =
  48. {
  49. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  50. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  51. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  52. INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  53. INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  54. INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  55. INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  56. INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  57. INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  58. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  59. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  60. EVENT_CONSTRAINT_END
  61. };
  62. static struct event_constraint intel_westmere_event_constraints[] =
  63. {
  64. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  65. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  66. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  67. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  68. INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  69. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  70. INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
  71. EVENT_CONSTRAINT_END
  72. };
  73. static struct event_constraint intel_gen_event_constraints[] =
  74. {
  75. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  76. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  77. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  78. EVENT_CONSTRAINT_END
  79. };
  80. static u64 intel_pmu_event_map(int hw_event)
  81. {
  82. return intel_perfmon_event_map[hw_event];
  83. }
  84. static __initconst const u64 westmere_hw_cache_event_ids
  85. [PERF_COUNT_HW_CACHE_MAX]
  86. [PERF_COUNT_HW_CACHE_OP_MAX]
  87. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  88. {
  89. [ C(L1D) ] = {
  90. [ C(OP_READ) ] = {
  91. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  92. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
  93. },
  94. [ C(OP_WRITE) ] = {
  95. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  96. [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
  97. },
  98. [ C(OP_PREFETCH) ] = {
  99. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  100. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  101. },
  102. },
  103. [ C(L1I ) ] = {
  104. [ C(OP_READ) ] = {
  105. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  106. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  107. },
  108. [ C(OP_WRITE) ] = {
  109. [ C(RESULT_ACCESS) ] = -1,
  110. [ C(RESULT_MISS) ] = -1,
  111. },
  112. [ C(OP_PREFETCH) ] = {
  113. [ C(RESULT_ACCESS) ] = 0x0,
  114. [ C(RESULT_MISS) ] = 0x0,
  115. },
  116. },
  117. [ C(LL ) ] = {
  118. [ C(OP_READ) ] = {
  119. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  120. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  121. },
  122. [ C(OP_WRITE) ] = {
  123. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  124. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  125. },
  126. [ C(OP_PREFETCH) ] = {
  127. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  128. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  129. },
  130. },
  131. [ C(DTLB) ] = {
  132. [ C(OP_READ) ] = {
  133. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  134. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  135. },
  136. [ C(OP_WRITE) ] = {
  137. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  138. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  139. },
  140. [ C(OP_PREFETCH) ] = {
  141. [ C(RESULT_ACCESS) ] = 0x0,
  142. [ C(RESULT_MISS) ] = 0x0,
  143. },
  144. },
  145. [ C(ITLB) ] = {
  146. [ C(OP_READ) ] = {
  147. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  148. [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
  149. },
  150. [ C(OP_WRITE) ] = {
  151. [ C(RESULT_ACCESS) ] = -1,
  152. [ C(RESULT_MISS) ] = -1,
  153. },
  154. [ C(OP_PREFETCH) ] = {
  155. [ C(RESULT_ACCESS) ] = -1,
  156. [ C(RESULT_MISS) ] = -1,
  157. },
  158. },
  159. [ C(BPU ) ] = {
  160. [ C(OP_READ) ] = {
  161. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  162. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  163. },
  164. [ C(OP_WRITE) ] = {
  165. [ C(RESULT_ACCESS) ] = -1,
  166. [ C(RESULT_MISS) ] = -1,
  167. },
  168. [ C(OP_PREFETCH) ] = {
  169. [ C(RESULT_ACCESS) ] = -1,
  170. [ C(RESULT_MISS) ] = -1,
  171. },
  172. },
  173. };
  174. static __initconst const u64 nehalem_hw_cache_event_ids
  175. [PERF_COUNT_HW_CACHE_MAX]
  176. [PERF_COUNT_HW_CACHE_OP_MAX]
  177. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  178. {
  179. [ C(L1D) ] = {
  180. [ C(OP_READ) ] = {
  181. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  182. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  183. },
  184. [ C(OP_WRITE) ] = {
  185. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  186. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  187. },
  188. [ C(OP_PREFETCH) ] = {
  189. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  190. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  191. },
  192. },
  193. [ C(L1I ) ] = {
  194. [ C(OP_READ) ] = {
  195. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  196. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  197. },
  198. [ C(OP_WRITE) ] = {
  199. [ C(RESULT_ACCESS) ] = -1,
  200. [ C(RESULT_MISS) ] = -1,
  201. },
  202. [ C(OP_PREFETCH) ] = {
  203. [ C(RESULT_ACCESS) ] = 0x0,
  204. [ C(RESULT_MISS) ] = 0x0,
  205. },
  206. },
  207. [ C(LL ) ] = {
  208. [ C(OP_READ) ] = {
  209. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  210. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  211. },
  212. [ C(OP_WRITE) ] = {
  213. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  214. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  215. },
  216. [ C(OP_PREFETCH) ] = {
  217. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  218. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  219. },
  220. },
  221. [ C(DTLB) ] = {
  222. [ C(OP_READ) ] = {
  223. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  224. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  225. },
  226. [ C(OP_WRITE) ] = {
  227. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  228. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  229. },
  230. [ C(OP_PREFETCH) ] = {
  231. [ C(RESULT_ACCESS) ] = 0x0,
  232. [ C(RESULT_MISS) ] = 0x0,
  233. },
  234. },
  235. [ C(ITLB) ] = {
  236. [ C(OP_READ) ] = {
  237. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  238. [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
  239. },
  240. [ C(OP_WRITE) ] = {
  241. [ C(RESULT_ACCESS) ] = -1,
  242. [ C(RESULT_MISS) ] = -1,
  243. },
  244. [ C(OP_PREFETCH) ] = {
  245. [ C(RESULT_ACCESS) ] = -1,
  246. [ C(RESULT_MISS) ] = -1,
  247. },
  248. },
  249. [ C(BPU ) ] = {
  250. [ C(OP_READ) ] = {
  251. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  252. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  253. },
  254. [ C(OP_WRITE) ] = {
  255. [ C(RESULT_ACCESS) ] = -1,
  256. [ C(RESULT_MISS) ] = -1,
  257. },
  258. [ C(OP_PREFETCH) ] = {
  259. [ C(RESULT_ACCESS) ] = -1,
  260. [ C(RESULT_MISS) ] = -1,
  261. },
  262. },
  263. };
  264. static __initconst const u64 core2_hw_cache_event_ids
  265. [PERF_COUNT_HW_CACHE_MAX]
  266. [PERF_COUNT_HW_CACHE_OP_MAX]
  267. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  268. {
  269. [ C(L1D) ] = {
  270. [ C(OP_READ) ] = {
  271. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  272. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  273. },
  274. [ C(OP_WRITE) ] = {
  275. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  276. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  277. },
  278. [ C(OP_PREFETCH) ] = {
  279. [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
  280. [ C(RESULT_MISS) ] = 0,
  281. },
  282. },
  283. [ C(L1I ) ] = {
  284. [ C(OP_READ) ] = {
  285. [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
  286. [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
  287. },
  288. [ C(OP_WRITE) ] = {
  289. [ C(RESULT_ACCESS) ] = -1,
  290. [ C(RESULT_MISS) ] = -1,
  291. },
  292. [ C(OP_PREFETCH) ] = {
  293. [ C(RESULT_ACCESS) ] = 0,
  294. [ C(RESULT_MISS) ] = 0,
  295. },
  296. },
  297. [ C(LL ) ] = {
  298. [ C(OP_READ) ] = {
  299. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  300. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  301. },
  302. [ C(OP_WRITE) ] = {
  303. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  304. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  305. },
  306. [ C(OP_PREFETCH) ] = {
  307. [ C(RESULT_ACCESS) ] = 0,
  308. [ C(RESULT_MISS) ] = 0,
  309. },
  310. },
  311. [ C(DTLB) ] = {
  312. [ C(OP_READ) ] = {
  313. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  314. [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
  315. },
  316. [ C(OP_WRITE) ] = {
  317. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  318. [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
  319. },
  320. [ C(OP_PREFETCH) ] = {
  321. [ C(RESULT_ACCESS) ] = 0,
  322. [ C(RESULT_MISS) ] = 0,
  323. },
  324. },
  325. [ C(ITLB) ] = {
  326. [ C(OP_READ) ] = {
  327. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  328. [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
  329. },
  330. [ C(OP_WRITE) ] = {
  331. [ C(RESULT_ACCESS) ] = -1,
  332. [ C(RESULT_MISS) ] = -1,
  333. },
  334. [ C(OP_PREFETCH) ] = {
  335. [ C(RESULT_ACCESS) ] = -1,
  336. [ C(RESULT_MISS) ] = -1,
  337. },
  338. },
  339. [ C(BPU ) ] = {
  340. [ C(OP_READ) ] = {
  341. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  342. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  343. },
  344. [ C(OP_WRITE) ] = {
  345. [ C(RESULT_ACCESS) ] = -1,
  346. [ C(RESULT_MISS) ] = -1,
  347. },
  348. [ C(OP_PREFETCH) ] = {
  349. [ C(RESULT_ACCESS) ] = -1,
  350. [ C(RESULT_MISS) ] = -1,
  351. },
  352. },
  353. };
  354. static __initconst const u64 atom_hw_cache_event_ids
  355. [PERF_COUNT_HW_CACHE_MAX]
  356. [PERF_COUNT_HW_CACHE_OP_MAX]
  357. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  358. {
  359. [ C(L1D) ] = {
  360. [ C(OP_READ) ] = {
  361. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
  362. [ C(RESULT_MISS) ] = 0,
  363. },
  364. [ C(OP_WRITE) ] = {
  365. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
  366. [ C(RESULT_MISS) ] = 0,
  367. },
  368. [ C(OP_PREFETCH) ] = {
  369. [ C(RESULT_ACCESS) ] = 0x0,
  370. [ C(RESULT_MISS) ] = 0,
  371. },
  372. },
  373. [ C(L1I ) ] = {
  374. [ C(OP_READ) ] = {
  375. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  376. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  377. },
  378. [ C(OP_WRITE) ] = {
  379. [ C(RESULT_ACCESS) ] = -1,
  380. [ C(RESULT_MISS) ] = -1,
  381. },
  382. [ C(OP_PREFETCH) ] = {
  383. [ C(RESULT_ACCESS) ] = 0,
  384. [ C(RESULT_MISS) ] = 0,
  385. },
  386. },
  387. [ C(LL ) ] = {
  388. [ C(OP_READ) ] = {
  389. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  390. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  391. },
  392. [ C(OP_WRITE) ] = {
  393. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  394. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  395. },
  396. [ C(OP_PREFETCH) ] = {
  397. [ C(RESULT_ACCESS) ] = 0,
  398. [ C(RESULT_MISS) ] = 0,
  399. },
  400. },
  401. [ C(DTLB) ] = {
  402. [ C(OP_READ) ] = {
  403. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
  404. [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
  405. },
  406. [ C(OP_WRITE) ] = {
  407. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
  408. [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
  409. },
  410. [ C(OP_PREFETCH) ] = {
  411. [ C(RESULT_ACCESS) ] = 0,
  412. [ C(RESULT_MISS) ] = 0,
  413. },
  414. },
  415. [ C(ITLB) ] = {
  416. [ C(OP_READ) ] = {
  417. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  418. [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
  419. },
  420. [ C(OP_WRITE) ] = {
  421. [ C(RESULT_ACCESS) ] = -1,
  422. [ C(RESULT_MISS) ] = -1,
  423. },
  424. [ C(OP_PREFETCH) ] = {
  425. [ C(RESULT_ACCESS) ] = -1,
  426. [ C(RESULT_MISS) ] = -1,
  427. },
  428. },
  429. [ C(BPU ) ] = {
  430. [ C(OP_READ) ] = {
  431. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  432. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  433. },
  434. [ C(OP_WRITE) ] = {
  435. [ C(RESULT_ACCESS) ] = -1,
  436. [ C(RESULT_MISS) ] = -1,
  437. },
  438. [ C(OP_PREFETCH) ] = {
  439. [ C(RESULT_ACCESS) ] = -1,
  440. [ C(RESULT_MISS) ] = -1,
  441. },
  442. },
  443. };
  444. static void intel_pmu_disable_all(void)
  445. {
  446. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  447. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  448. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  449. intel_pmu_disable_bts();
  450. intel_pmu_pebs_disable_all();
  451. intel_pmu_lbr_disable_all();
  452. }
  453. static void intel_pmu_enable_all(int added)
  454. {
  455. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  456. intel_pmu_pebs_enable_all();
  457. intel_pmu_lbr_enable_all();
  458. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  459. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  460. struct perf_event *event =
  461. cpuc->events[X86_PMC_IDX_FIXED_BTS];
  462. if (WARN_ON_ONCE(!event))
  463. return;
  464. intel_pmu_enable_bts(event->hw.config);
  465. }
  466. }
  467. /*
  468. * Workaround for:
  469. * Intel Errata AAK100 (model 26)
  470. * Intel Errata AAP53 (model 30)
  471. * Intel Errata BD53 (model 44)
  472. *
  473. * The official story:
  474. * These chips need to be 'reset' when adding counters by programming the
  475. * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
  476. * in sequence on the same PMC or on different PMCs.
  477. *
  478. * In practise it appears some of these events do in fact count, and
  479. * we need to programm all 4 events.
  480. */
  481. static void intel_pmu_nhm_workaround(void)
  482. {
  483. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  484. static const unsigned long nhm_magic[4] = {
  485. 0x4300B5,
  486. 0x4300D2,
  487. 0x4300B1,
  488. 0x4300B1
  489. };
  490. struct perf_event *event;
  491. int i;
  492. /*
  493. * The Errata requires below steps:
  494. * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
  495. * 2) Configure 4 PERFEVTSELx with the magic events and clear
  496. * the corresponding PMCx;
  497. * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
  498. * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
  499. * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
  500. */
  501. /*
  502. * The real steps we choose are a little different from above.
  503. * A) To reduce MSR operations, we don't run step 1) as they
  504. * are already cleared before this function is called;
  505. * B) Call x86_perf_event_update to save PMCx before configuring
  506. * PERFEVTSELx with magic number;
  507. * C) With step 5), we do clear only when the PERFEVTSELx is
  508. * not used currently.
  509. * D) Call x86_perf_event_set_period to restore PMCx;
  510. */
  511. /* We always operate 4 pairs of PERF Counters */
  512. for (i = 0; i < 4; i++) {
  513. event = cpuc->events[i];
  514. if (event)
  515. x86_perf_event_update(event);
  516. }
  517. for (i = 0; i < 4; i++) {
  518. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
  519. wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
  520. }
  521. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
  522. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
  523. for (i = 0; i < 4; i++) {
  524. event = cpuc->events[i];
  525. if (event) {
  526. x86_perf_event_set_period(event);
  527. __x86_pmu_enable_event(&event->hw,
  528. ARCH_PERFMON_EVENTSEL_ENABLE);
  529. } else
  530. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
  531. }
  532. }
  533. static void intel_pmu_nhm_enable_all(int added)
  534. {
  535. if (added)
  536. intel_pmu_nhm_workaround();
  537. intel_pmu_enable_all(added);
  538. }
  539. static inline u64 intel_pmu_get_status(void)
  540. {
  541. u64 status;
  542. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  543. return status;
  544. }
  545. static inline void intel_pmu_ack_status(u64 ack)
  546. {
  547. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  548. }
  549. static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
  550. {
  551. int idx = hwc->idx - X86_PMC_IDX_FIXED;
  552. u64 ctrl_val, mask;
  553. mask = 0xfULL << (idx * 4);
  554. rdmsrl(hwc->config_base, ctrl_val);
  555. ctrl_val &= ~mask;
  556. wrmsrl(hwc->config_base, ctrl_val);
  557. }
  558. static void intel_pmu_disable_event(struct perf_event *event)
  559. {
  560. struct hw_perf_event *hwc = &event->hw;
  561. if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
  562. intel_pmu_disable_bts();
  563. intel_pmu_drain_bts_buffer();
  564. return;
  565. }
  566. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  567. intel_pmu_disable_fixed(hwc);
  568. return;
  569. }
  570. x86_pmu_disable_event(event);
  571. if (unlikely(event->attr.precise_ip))
  572. intel_pmu_pebs_disable(event);
  573. }
  574. static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
  575. {
  576. int idx = hwc->idx - X86_PMC_IDX_FIXED;
  577. u64 ctrl_val, bits, mask;
  578. /*
  579. * Enable IRQ generation (0x8),
  580. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  581. * if requested:
  582. */
  583. bits = 0x8ULL;
  584. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  585. bits |= 0x2;
  586. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  587. bits |= 0x1;
  588. /*
  589. * ANY bit is supported in v3 and up
  590. */
  591. if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  592. bits |= 0x4;
  593. bits <<= (idx * 4);
  594. mask = 0xfULL << (idx * 4);
  595. rdmsrl(hwc->config_base, ctrl_val);
  596. ctrl_val &= ~mask;
  597. ctrl_val |= bits;
  598. wrmsrl(hwc->config_base, ctrl_val);
  599. }
  600. static void intel_pmu_enable_event(struct perf_event *event)
  601. {
  602. struct hw_perf_event *hwc = &event->hw;
  603. if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
  604. if (!__get_cpu_var(cpu_hw_events).enabled)
  605. return;
  606. intel_pmu_enable_bts(hwc->config);
  607. return;
  608. }
  609. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  610. intel_pmu_enable_fixed(hwc);
  611. return;
  612. }
  613. if (unlikely(event->attr.precise_ip))
  614. intel_pmu_pebs_enable(event);
  615. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  616. }
  617. /*
  618. * Save and restart an expired event. Called by NMI contexts,
  619. * so it has to be careful about preempting normal event ops:
  620. */
  621. static int intel_pmu_save_and_restart(struct perf_event *event)
  622. {
  623. x86_perf_event_update(event);
  624. return x86_perf_event_set_period(event);
  625. }
  626. static void intel_pmu_reset(void)
  627. {
  628. struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
  629. unsigned long flags;
  630. int idx;
  631. if (!x86_pmu.num_counters)
  632. return;
  633. local_irq_save(flags);
  634. printk("clearing PMU state on CPU#%d\n", smp_processor_id());
  635. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  636. checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
  637. checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
  638. }
  639. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
  640. checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
  641. if (ds)
  642. ds->bts_index = ds->bts_buffer_base;
  643. local_irq_restore(flags);
  644. }
  645. /*
  646. * This handler is triggered by the local APIC, so the APIC IRQ handling
  647. * rules apply:
  648. */
  649. static int intel_pmu_handle_irq(struct pt_regs *regs)
  650. {
  651. struct perf_sample_data data;
  652. struct cpu_hw_events *cpuc;
  653. int bit, loops;
  654. u64 status;
  655. int handled = 0;
  656. perf_sample_data_init(&data, 0);
  657. cpuc = &__get_cpu_var(cpu_hw_events);
  658. intel_pmu_disable_all();
  659. intel_pmu_drain_bts_buffer();
  660. status = intel_pmu_get_status();
  661. if (!status) {
  662. intel_pmu_enable_all(0);
  663. return 0;
  664. }
  665. loops = 0;
  666. again:
  667. intel_pmu_ack_status(status);
  668. if (++loops > 100) {
  669. WARN_ONCE(1, "perfevents: irq loop stuck!\n");
  670. perf_event_print_debug();
  671. intel_pmu_reset();
  672. goto done;
  673. }
  674. inc_irq_stat(apic_perf_irqs);
  675. intel_pmu_lbr_read();
  676. /*
  677. * PEBS overflow sets bit 62 in the global status register
  678. */
  679. if (__test_and_clear_bit(62, (unsigned long *)&status)) {
  680. handled++;
  681. x86_pmu.drain_pebs(regs);
  682. }
  683. for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  684. struct perf_event *event = cpuc->events[bit];
  685. handled++;
  686. if (!test_bit(bit, cpuc->active_mask))
  687. continue;
  688. if (!intel_pmu_save_and_restart(event))
  689. continue;
  690. data.period = event->hw.last_period;
  691. if (perf_event_overflow(event, 1, &data, regs))
  692. x86_pmu_stop(event);
  693. }
  694. /*
  695. * Repeat if there is more work to be done:
  696. */
  697. status = intel_pmu_get_status();
  698. if (status)
  699. goto again;
  700. done:
  701. intel_pmu_enable_all(0);
  702. return handled;
  703. }
  704. static struct event_constraint *
  705. intel_bts_constraints(struct perf_event *event)
  706. {
  707. struct hw_perf_event *hwc = &event->hw;
  708. unsigned int hw_event, bts_event;
  709. hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
  710. bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
  711. if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
  712. return &bts_constraint;
  713. return NULL;
  714. }
  715. static struct event_constraint *
  716. intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  717. {
  718. struct event_constraint *c;
  719. c = intel_bts_constraints(event);
  720. if (c)
  721. return c;
  722. c = intel_pebs_constraints(event);
  723. if (c)
  724. return c;
  725. return x86_get_event_constraints(cpuc, event);
  726. }
  727. static int intel_pmu_hw_config(struct perf_event *event)
  728. {
  729. int ret = x86_pmu_hw_config(event);
  730. if (ret)
  731. return ret;
  732. if (event->attr.type != PERF_TYPE_RAW)
  733. return 0;
  734. if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
  735. return 0;
  736. if (x86_pmu.version < 3)
  737. return -EINVAL;
  738. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  739. return -EACCES;
  740. event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
  741. return 0;
  742. }
  743. static __initconst const struct x86_pmu core_pmu = {
  744. .name = "core",
  745. .handle_irq = x86_pmu_handle_irq,
  746. .disable_all = x86_pmu_disable_all,
  747. .enable_all = x86_pmu_enable_all,
  748. .enable = x86_pmu_enable_event,
  749. .disable = x86_pmu_disable_event,
  750. .hw_config = x86_pmu_hw_config,
  751. .schedule_events = x86_schedule_events,
  752. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  753. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  754. .event_map = intel_pmu_event_map,
  755. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  756. .apic = 1,
  757. /*
  758. * Intel PMCs cannot be accessed sanely above 32 bit width,
  759. * so we install an artificial 1<<31 period regardless of
  760. * the generic event period:
  761. */
  762. .max_period = (1ULL << 31) - 1,
  763. .get_event_constraints = intel_get_event_constraints,
  764. .event_constraints = intel_core_event_constraints,
  765. };
  766. static void intel_pmu_cpu_starting(int cpu)
  767. {
  768. init_debug_store_on_cpu(cpu);
  769. /*
  770. * Deal with CPUs that don't clear their LBRs on power-up.
  771. */
  772. intel_pmu_lbr_reset();
  773. }
  774. static void intel_pmu_cpu_dying(int cpu)
  775. {
  776. fini_debug_store_on_cpu(cpu);
  777. }
  778. static __initconst const struct x86_pmu intel_pmu = {
  779. .name = "Intel",
  780. .handle_irq = intel_pmu_handle_irq,
  781. .disable_all = intel_pmu_disable_all,
  782. .enable_all = intel_pmu_enable_all,
  783. .enable = intel_pmu_enable_event,
  784. .disable = intel_pmu_disable_event,
  785. .hw_config = intel_pmu_hw_config,
  786. .schedule_events = x86_schedule_events,
  787. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  788. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  789. .event_map = intel_pmu_event_map,
  790. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  791. .apic = 1,
  792. /*
  793. * Intel PMCs cannot be accessed sanely above 32 bit width,
  794. * so we install an artificial 1<<31 period regardless of
  795. * the generic event period:
  796. */
  797. .max_period = (1ULL << 31) - 1,
  798. .get_event_constraints = intel_get_event_constraints,
  799. .cpu_starting = intel_pmu_cpu_starting,
  800. .cpu_dying = intel_pmu_cpu_dying,
  801. };
  802. static void intel_clovertown_quirks(void)
  803. {
  804. /*
  805. * PEBS is unreliable due to:
  806. *
  807. * AJ67 - PEBS may experience CPL leaks
  808. * AJ68 - PEBS PMI may be delayed by one event
  809. * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
  810. * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
  811. *
  812. * AJ67 could be worked around by restricting the OS/USR flags.
  813. * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
  814. *
  815. * AJ106 could possibly be worked around by not allowing LBR
  816. * usage from PEBS, including the fixup.
  817. * AJ68 could possibly be worked around by always programming
  818. * a pebs_event_reset[0] value and coping with the lost events.
  819. *
  820. * But taken together it might just make sense to not enable PEBS on
  821. * these chips.
  822. */
  823. printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
  824. x86_pmu.pebs = 0;
  825. x86_pmu.pebs_constraints = NULL;
  826. }
  827. static __init int intel_pmu_init(void)
  828. {
  829. union cpuid10_edx edx;
  830. union cpuid10_eax eax;
  831. unsigned int unused;
  832. unsigned int ebx;
  833. int version;
  834. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  835. switch (boot_cpu_data.x86) {
  836. case 0x6:
  837. return p6_pmu_init();
  838. case 0xf:
  839. return p4_pmu_init();
  840. }
  841. return -ENODEV;
  842. }
  843. /*
  844. * Check whether the Architectural PerfMon supports
  845. * Branch Misses Retired hw_event or not.
  846. */
  847. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  848. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  849. return -ENODEV;
  850. version = eax.split.version_id;
  851. if (version < 2)
  852. x86_pmu = core_pmu;
  853. else
  854. x86_pmu = intel_pmu;
  855. x86_pmu.version = version;
  856. x86_pmu.num_counters = eax.split.num_counters;
  857. x86_pmu.cntval_bits = eax.split.bit_width;
  858. x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
  859. /*
  860. * Quirk: v2 perfmon does not report fixed-purpose events, so
  861. * assume at least 3 events:
  862. */
  863. if (version > 1)
  864. x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
  865. /*
  866. * v2 and above have a perf capabilities MSR
  867. */
  868. if (version > 1) {
  869. u64 capabilities;
  870. rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
  871. x86_pmu.intel_cap.capabilities = capabilities;
  872. }
  873. intel_ds_init();
  874. /*
  875. * Install the hw-cache-events table:
  876. */
  877. switch (boot_cpu_data.x86_model) {
  878. case 14: /* 65 nm core solo/duo, "Yonah" */
  879. pr_cont("Core events, ");
  880. break;
  881. case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
  882. x86_pmu.quirks = intel_clovertown_quirks;
  883. case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  884. case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  885. case 29: /* six-core 45 nm xeon "Dunnington" */
  886. memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  887. sizeof(hw_cache_event_ids));
  888. intel_pmu_lbr_init_core();
  889. x86_pmu.event_constraints = intel_core2_event_constraints;
  890. pr_cont("Core2 events, ");
  891. break;
  892. case 26: /* 45 nm nehalem, "Bloomfield" */
  893. case 30: /* 45 nm nehalem, "Lynnfield" */
  894. case 46: /* 45 nm nehalem-ex, "Beckton" */
  895. memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  896. sizeof(hw_cache_event_ids));
  897. intel_pmu_lbr_init_nhm();
  898. x86_pmu.event_constraints = intel_nehalem_event_constraints;
  899. x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  900. pr_cont("Nehalem events, ");
  901. break;
  902. case 28: /* Atom */
  903. memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  904. sizeof(hw_cache_event_ids));
  905. intel_pmu_lbr_init_atom();
  906. x86_pmu.event_constraints = intel_gen_event_constraints;
  907. pr_cont("Atom events, ");
  908. break;
  909. case 37: /* 32 nm nehalem, "Clarkdale" */
  910. case 44: /* 32 nm nehalem, "Gulftown" */
  911. memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  912. sizeof(hw_cache_event_ids));
  913. intel_pmu_lbr_init_nhm();
  914. x86_pmu.event_constraints = intel_westmere_event_constraints;
  915. x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  916. pr_cont("Westmere events, ");
  917. break;
  918. default:
  919. /*
  920. * default constraints for v2 and up
  921. */
  922. x86_pmu.event_constraints = intel_gen_event_constraints;
  923. pr_cont("generic architected perfmon, ");
  924. }
  925. return 0;
  926. }
  927. #else /* CONFIG_CPU_SUP_INTEL */
  928. static int intel_pmu_init(void)
  929. {
  930. return 0;
  931. }
  932. #endif /* CONFIG_CPU_SUP_INTEL */