perf_event_intel.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. #ifdef CONFIG_CPU_SUP_INTEL
  2. /*
  3. * Intel PerfMon, used on Core and later.
  4. */
  5. static const u64 intel_perfmon_event_map[] =
  6. {
  7. [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
  8. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  9. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
  10. [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
  11. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  12. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  13. [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
  14. };
  15. static struct event_constraint intel_core_event_constraints[] =
  16. {
  17. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  18. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  19. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  20. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  21. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  22. INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  23. EVENT_CONSTRAINT_END
  24. };
  25. static struct event_constraint intel_core2_event_constraints[] =
  26. {
  27. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  28. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  29. /*
  30. * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
  31. * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
  32. * ratio between these counters.
  33. */
  34. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  35. INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  36. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  37. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  38. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  39. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  40. INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  41. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  42. INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  43. INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  44. INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  45. EVENT_CONSTRAINT_END
  46. };
  47. static struct event_constraint intel_nehalem_event_constraints[] =
  48. {
  49. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  50. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  51. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  52. INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  53. INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  54. INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  55. INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  56. INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  57. INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  58. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  59. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  60. EVENT_CONSTRAINT_END
  61. };
  62. static struct event_constraint intel_westmere_event_constraints[] =
  63. {
  64. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  65. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  66. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  67. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  68. INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  69. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  70. EVENT_CONSTRAINT_END
  71. };
  72. static struct event_constraint intel_gen_event_constraints[] =
  73. {
  74. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  75. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  76. /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
  77. EVENT_CONSTRAINT_END
  78. };
  79. static u64 intel_pmu_event_map(int hw_event)
  80. {
  81. return intel_perfmon_event_map[hw_event];
  82. }
  83. static __initconst u64 westmere_hw_cache_event_ids
  84. [PERF_COUNT_HW_CACHE_MAX]
  85. [PERF_COUNT_HW_CACHE_OP_MAX]
  86. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  87. {
  88. [ C(L1D) ] = {
  89. [ C(OP_READ) ] = {
  90. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  91. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
  92. },
  93. [ C(OP_WRITE) ] = {
  94. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  95. [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
  96. },
  97. [ C(OP_PREFETCH) ] = {
  98. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  99. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  100. },
  101. },
  102. [ C(L1I ) ] = {
  103. [ C(OP_READ) ] = {
  104. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  105. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  106. },
  107. [ C(OP_WRITE) ] = {
  108. [ C(RESULT_ACCESS) ] = -1,
  109. [ C(RESULT_MISS) ] = -1,
  110. },
  111. [ C(OP_PREFETCH) ] = {
  112. [ C(RESULT_ACCESS) ] = 0x0,
  113. [ C(RESULT_MISS) ] = 0x0,
  114. },
  115. },
  116. [ C(LL ) ] = {
  117. [ C(OP_READ) ] = {
  118. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  119. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  120. },
  121. [ C(OP_WRITE) ] = {
  122. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  123. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  124. },
  125. [ C(OP_PREFETCH) ] = {
  126. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  127. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  128. },
  129. },
  130. [ C(DTLB) ] = {
  131. [ C(OP_READ) ] = {
  132. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  133. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  134. },
  135. [ C(OP_WRITE) ] = {
  136. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  137. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  138. },
  139. [ C(OP_PREFETCH) ] = {
  140. [ C(RESULT_ACCESS) ] = 0x0,
  141. [ C(RESULT_MISS) ] = 0x0,
  142. },
  143. },
  144. [ C(ITLB) ] = {
  145. [ C(OP_READ) ] = {
  146. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  147. [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
  148. },
  149. [ C(OP_WRITE) ] = {
  150. [ C(RESULT_ACCESS) ] = -1,
  151. [ C(RESULT_MISS) ] = -1,
  152. },
  153. [ C(OP_PREFETCH) ] = {
  154. [ C(RESULT_ACCESS) ] = -1,
  155. [ C(RESULT_MISS) ] = -1,
  156. },
  157. },
  158. [ C(BPU ) ] = {
  159. [ C(OP_READ) ] = {
  160. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  161. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  162. },
  163. [ C(OP_WRITE) ] = {
  164. [ C(RESULT_ACCESS) ] = -1,
  165. [ C(RESULT_MISS) ] = -1,
  166. },
  167. [ C(OP_PREFETCH) ] = {
  168. [ C(RESULT_ACCESS) ] = -1,
  169. [ C(RESULT_MISS) ] = -1,
  170. },
  171. },
  172. };
  173. static __initconst u64 nehalem_hw_cache_event_ids
  174. [PERF_COUNT_HW_CACHE_MAX]
  175. [PERF_COUNT_HW_CACHE_OP_MAX]
  176. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  177. {
  178. [ C(L1D) ] = {
  179. [ C(OP_READ) ] = {
  180. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  181. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  182. },
  183. [ C(OP_WRITE) ] = {
  184. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  185. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  186. },
  187. [ C(OP_PREFETCH) ] = {
  188. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  189. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  190. },
  191. },
  192. [ C(L1I ) ] = {
  193. [ C(OP_READ) ] = {
  194. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  195. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  196. },
  197. [ C(OP_WRITE) ] = {
  198. [ C(RESULT_ACCESS) ] = -1,
  199. [ C(RESULT_MISS) ] = -1,
  200. },
  201. [ C(OP_PREFETCH) ] = {
  202. [ C(RESULT_ACCESS) ] = 0x0,
  203. [ C(RESULT_MISS) ] = 0x0,
  204. },
  205. },
  206. [ C(LL ) ] = {
  207. [ C(OP_READ) ] = {
  208. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  209. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  210. },
  211. [ C(OP_WRITE) ] = {
  212. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  213. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  214. },
  215. [ C(OP_PREFETCH) ] = {
  216. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  217. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  218. },
  219. },
  220. [ C(DTLB) ] = {
  221. [ C(OP_READ) ] = {
  222. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  223. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  224. },
  225. [ C(OP_WRITE) ] = {
  226. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  227. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  228. },
  229. [ C(OP_PREFETCH) ] = {
  230. [ C(RESULT_ACCESS) ] = 0x0,
  231. [ C(RESULT_MISS) ] = 0x0,
  232. },
  233. },
  234. [ C(ITLB) ] = {
  235. [ C(OP_READ) ] = {
  236. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  237. [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
  238. },
  239. [ C(OP_WRITE) ] = {
  240. [ C(RESULT_ACCESS) ] = -1,
  241. [ C(RESULT_MISS) ] = -1,
  242. },
  243. [ C(OP_PREFETCH) ] = {
  244. [ C(RESULT_ACCESS) ] = -1,
  245. [ C(RESULT_MISS) ] = -1,
  246. },
  247. },
  248. [ C(BPU ) ] = {
  249. [ C(OP_READ) ] = {
  250. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  251. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  252. },
  253. [ C(OP_WRITE) ] = {
  254. [ C(RESULT_ACCESS) ] = -1,
  255. [ C(RESULT_MISS) ] = -1,
  256. },
  257. [ C(OP_PREFETCH) ] = {
  258. [ C(RESULT_ACCESS) ] = -1,
  259. [ C(RESULT_MISS) ] = -1,
  260. },
  261. },
  262. };
  263. static __initconst u64 core2_hw_cache_event_ids
  264. [PERF_COUNT_HW_CACHE_MAX]
  265. [PERF_COUNT_HW_CACHE_OP_MAX]
  266. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  267. {
  268. [ C(L1D) ] = {
  269. [ C(OP_READ) ] = {
  270. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  271. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  272. },
  273. [ C(OP_WRITE) ] = {
  274. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  275. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  276. },
  277. [ C(OP_PREFETCH) ] = {
  278. [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
  279. [ C(RESULT_MISS) ] = 0,
  280. },
  281. },
  282. [ C(L1I ) ] = {
  283. [ C(OP_READ) ] = {
  284. [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
  285. [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
  286. },
  287. [ C(OP_WRITE) ] = {
  288. [ C(RESULT_ACCESS) ] = -1,
  289. [ C(RESULT_MISS) ] = -1,
  290. },
  291. [ C(OP_PREFETCH) ] = {
  292. [ C(RESULT_ACCESS) ] = 0,
  293. [ C(RESULT_MISS) ] = 0,
  294. },
  295. },
  296. [ C(LL ) ] = {
  297. [ C(OP_READ) ] = {
  298. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  299. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  300. },
  301. [ C(OP_WRITE) ] = {
  302. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  303. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  304. },
  305. [ C(OP_PREFETCH) ] = {
  306. [ C(RESULT_ACCESS) ] = 0,
  307. [ C(RESULT_MISS) ] = 0,
  308. },
  309. },
  310. [ C(DTLB) ] = {
  311. [ C(OP_READ) ] = {
  312. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  313. [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
  314. },
  315. [ C(OP_WRITE) ] = {
  316. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  317. [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
  318. },
  319. [ C(OP_PREFETCH) ] = {
  320. [ C(RESULT_ACCESS) ] = 0,
  321. [ C(RESULT_MISS) ] = 0,
  322. },
  323. },
  324. [ C(ITLB) ] = {
  325. [ C(OP_READ) ] = {
  326. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  327. [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
  328. },
  329. [ C(OP_WRITE) ] = {
  330. [ C(RESULT_ACCESS) ] = -1,
  331. [ C(RESULT_MISS) ] = -1,
  332. },
  333. [ C(OP_PREFETCH) ] = {
  334. [ C(RESULT_ACCESS) ] = -1,
  335. [ C(RESULT_MISS) ] = -1,
  336. },
  337. },
  338. [ C(BPU ) ] = {
  339. [ C(OP_READ) ] = {
  340. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  341. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  342. },
  343. [ C(OP_WRITE) ] = {
  344. [ C(RESULT_ACCESS) ] = -1,
  345. [ C(RESULT_MISS) ] = -1,
  346. },
  347. [ C(OP_PREFETCH) ] = {
  348. [ C(RESULT_ACCESS) ] = -1,
  349. [ C(RESULT_MISS) ] = -1,
  350. },
  351. },
  352. };
  353. static __initconst u64 atom_hw_cache_event_ids
  354. [PERF_COUNT_HW_CACHE_MAX]
  355. [PERF_COUNT_HW_CACHE_OP_MAX]
  356. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  357. {
  358. [ C(L1D) ] = {
  359. [ C(OP_READ) ] = {
  360. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
  361. [ C(RESULT_MISS) ] = 0,
  362. },
  363. [ C(OP_WRITE) ] = {
  364. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
  365. [ C(RESULT_MISS) ] = 0,
  366. },
  367. [ C(OP_PREFETCH) ] = {
  368. [ C(RESULT_ACCESS) ] = 0x0,
  369. [ C(RESULT_MISS) ] = 0,
  370. },
  371. },
  372. [ C(L1I ) ] = {
  373. [ C(OP_READ) ] = {
  374. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  375. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  376. },
  377. [ C(OP_WRITE) ] = {
  378. [ C(RESULT_ACCESS) ] = -1,
  379. [ C(RESULT_MISS) ] = -1,
  380. },
  381. [ C(OP_PREFETCH) ] = {
  382. [ C(RESULT_ACCESS) ] = 0,
  383. [ C(RESULT_MISS) ] = 0,
  384. },
  385. },
  386. [ C(LL ) ] = {
  387. [ C(OP_READ) ] = {
  388. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  389. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  390. },
  391. [ C(OP_WRITE) ] = {
  392. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  393. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  394. },
  395. [ C(OP_PREFETCH) ] = {
  396. [ C(RESULT_ACCESS) ] = 0,
  397. [ C(RESULT_MISS) ] = 0,
  398. },
  399. },
  400. [ C(DTLB) ] = {
  401. [ C(OP_READ) ] = {
  402. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
  403. [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
  404. },
  405. [ C(OP_WRITE) ] = {
  406. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
  407. [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
  408. },
  409. [ C(OP_PREFETCH) ] = {
  410. [ C(RESULT_ACCESS) ] = 0,
  411. [ C(RESULT_MISS) ] = 0,
  412. },
  413. },
  414. [ C(ITLB) ] = {
  415. [ C(OP_READ) ] = {
  416. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  417. [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
  418. },
  419. [ C(OP_WRITE) ] = {
  420. [ C(RESULT_ACCESS) ] = -1,
  421. [ C(RESULT_MISS) ] = -1,
  422. },
  423. [ C(OP_PREFETCH) ] = {
  424. [ C(RESULT_ACCESS) ] = -1,
  425. [ C(RESULT_MISS) ] = -1,
  426. },
  427. },
  428. [ C(BPU ) ] = {
  429. [ C(OP_READ) ] = {
  430. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  431. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  432. },
  433. [ C(OP_WRITE) ] = {
  434. [ C(RESULT_ACCESS) ] = -1,
  435. [ C(RESULT_MISS) ] = -1,
  436. },
  437. [ C(OP_PREFETCH) ] = {
  438. [ C(RESULT_ACCESS) ] = -1,
  439. [ C(RESULT_MISS) ] = -1,
  440. },
  441. },
  442. };
  443. static u64 intel_pmu_raw_event(u64 hw_event)
  444. {
  445. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  446. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  447. #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
  448. #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
  449. #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
  450. #define CORE_EVNTSEL_MASK \
  451. (INTEL_ARCH_EVTSEL_MASK | \
  452. INTEL_ARCH_UNIT_MASK | \
  453. INTEL_ARCH_EDGE_MASK | \
  454. INTEL_ARCH_INV_MASK | \
  455. INTEL_ARCH_CNT_MASK)
  456. return hw_event & CORE_EVNTSEL_MASK;
  457. }
  458. static void intel_pmu_enable_bts(u64 config)
  459. {
  460. unsigned long debugctlmsr;
  461. debugctlmsr = get_debugctlmsr();
  462. debugctlmsr |= X86_DEBUGCTL_TR;
  463. debugctlmsr |= X86_DEBUGCTL_BTS;
  464. debugctlmsr |= X86_DEBUGCTL_BTINT;
  465. if (!(config & ARCH_PERFMON_EVENTSEL_OS))
  466. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
  467. if (!(config & ARCH_PERFMON_EVENTSEL_USR))
  468. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
  469. update_debugctlmsr(debugctlmsr);
  470. }
  471. static void intel_pmu_disable_bts(void)
  472. {
  473. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  474. unsigned long debugctlmsr;
  475. if (!cpuc->ds)
  476. return;
  477. debugctlmsr = get_debugctlmsr();
  478. debugctlmsr &=
  479. ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
  480. X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
  481. update_debugctlmsr(debugctlmsr);
  482. }
  483. static void intel_pmu_disable_all(void)
  484. {
  485. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  486. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  487. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  488. intel_pmu_disable_bts();
  489. }
  490. static void intel_pmu_enable_all(void)
  491. {
  492. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  493. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  494. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  495. struct perf_event *event =
  496. cpuc->events[X86_PMC_IDX_FIXED_BTS];
  497. if (WARN_ON_ONCE(!event))
  498. return;
  499. intel_pmu_enable_bts(event->hw.config);
  500. }
  501. }
  502. static inline u64 intel_pmu_get_status(void)
  503. {
  504. u64 status;
  505. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  506. return status;
  507. }
  508. static inline void intel_pmu_ack_status(u64 ack)
  509. {
  510. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  511. }
  512. static inline void
  513. intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
  514. {
  515. int idx = __idx - X86_PMC_IDX_FIXED;
  516. u64 ctrl_val, mask;
  517. mask = 0xfULL << (idx * 4);
  518. rdmsrl(hwc->config_base, ctrl_val);
  519. ctrl_val &= ~mask;
  520. (void)checking_wrmsrl(hwc->config_base, ctrl_val);
  521. }
  522. static void intel_pmu_drain_bts_buffer(void)
  523. {
  524. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  525. struct debug_store *ds = cpuc->ds;
  526. struct bts_record {
  527. u64 from;
  528. u64 to;
  529. u64 flags;
  530. };
  531. struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
  532. struct bts_record *at, *top;
  533. struct perf_output_handle handle;
  534. struct perf_event_header header;
  535. struct perf_sample_data data;
  536. struct pt_regs regs;
  537. if (!event)
  538. return;
  539. if (!ds)
  540. return;
  541. at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
  542. top = (struct bts_record *)(unsigned long)ds->bts_index;
  543. if (top <= at)
  544. return;
  545. ds->bts_index = ds->bts_buffer_base;
  546. perf_sample_data_init(&data, 0);
  547. data.period = event->hw.last_period;
  548. regs.ip = 0;
  549. /*
  550. * Prepare a generic sample, i.e. fill in the invariant fields.
  551. * We will overwrite the from and to address before we output
  552. * the sample.
  553. */
  554. perf_prepare_sample(&header, &data, event, &regs);
  555. if (perf_output_begin(&handle, event,
  556. header.size * (top - at), 1, 1))
  557. return;
  558. for (; at < top; at++) {
  559. data.ip = at->from;
  560. data.addr = at->to;
  561. perf_output_sample(&handle, &header, &data, event);
  562. }
  563. perf_output_end(&handle);
  564. /* There's new data available. */
  565. event->hw.interrupts++;
  566. event->pending_kill = POLL_IN;
  567. }
  568. static inline void
  569. intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  570. {
  571. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  572. intel_pmu_disable_bts();
  573. intel_pmu_drain_bts_buffer();
  574. return;
  575. }
  576. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  577. intel_pmu_disable_fixed(hwc, idx);
  578. return;
  579. }
  580. x86_pmu_disable_event(hwc, idx);
  581. }
  582. static inline void
  583. intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
  584. {
  585. int idx = __idx - X86_PMC_IDX_FIXED;
  586. u64 ctrl_val, bits, mask;
  587. int err;
  588. /*
  589. * Enable IRQ generation (0x8),
  590. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  591. * if requested:
  592. */
  593. bits = 0x8ULL;
  594. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  595. bits |= 0x2;
  596. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  597. bits |= 0x1;
  598. /*
  599. * ANY bit is supported in v3 and up
  600. */
  601. if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  602. bits |= 0x4;
  603. bits <<= (idx * 4);
  604. mask = 0xfULL << (idx * 4);
  605. rdmsrl(hwc->config_base, ctrl_val);
  606. ctrl_val &= ~mask;
  607. ctrl_val |= bits;
  608. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  609. }
  610. static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  611. {
  612. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  613. if (!__get_cpu_var(cpu_hw_events).enabled)
  614. return;
  615. intel_pmu_enable_bts(hwc->config);
  616. return;
  617. }
  618. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  619. intel_pmu_enable_fixed(hwc, idx);
  620. return;
  621. }
  622. __x86_pmu_enable_event(hwc, idx);
  623. }
  624. /*
  625. * Save and restart an expired event. Called by NMI contexts,
  626. * so it has to be careful about preempting normal event ops:
  627. */
  628. static int intel_pmu_save_and_restart(struct perf_event *event)
  629. {
  630. struct hw_perf_event *hwc = &event->hw;
  631. int idx = hwc->idx;
  632. int ret;
  633. x86_perf_event_update(event, hwc, idx);
  634. ret = x86_perf_event_set_period(event, hwc, idx);
  635. return ret;
  636. }
  637. static void intel_pmu_reset(void)
  638. {
  639. struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
  640. unsigned long flags;
  641. int idx;
  642. if (!x86_pmu.num_events)
  643. return;
  644. local_irq_save(flags);
  645. printk("clearing PMU state on CPU#%d\n", smp_processor_id());
  646. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  647. checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
  648. checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
  649. }
  650. for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
  651. checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
  652. }
  653. if (ds)
  654. ds->bts_index = ds->bts_buffer_base;
  655. local_irq_restore(flags);
  656. }
  657. /*
  658. * This handler is triggered by the local APIC, so the APIC IRQ handling
  659. * rules apply:
  660. */
  661. static int intel_pmu_handle_irq(struct pt_regs *regs)
  662. {
  663. struct perf_sample_data data;
  664. struct cpu_hw_events *cpuc;
  665. int bit, loops;
  666. u64 ack, status;
  667. perf_sample_data_init(&data, 0);
  668. cpuc = &__get_cpu_var(cpu_hw_events);
  669. perf_disable();
  670. intel_pmu_drain_bts_buffer();
  671. status = intel_pmu_get_status();
  672. if (!status) {
  673. perf_enable();
  674. return 0;
  675. }
  676. loops = 0;
  677. again:
  678. if (++loops > 100) {
  679. WARN_ONCE(1, "perfevents: irq loop stuck!\n");
  680. perf_event_print_debug();
  681. intel_pmu_reset();
  682. perf_enable();
  683. return 1;
  684. }
  685. inc_irq_stat(apic_perf_irqs);
  686. ack = status;
  687. for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  688. struct perf_event *event = cpuc->events[bit];
  689. clear_bit(bit, (unsigned long *) &status);
  690. if (!test_bit(bit, cpuc->active_mask))
  691. continue;
  692. if (!intel_pmu_save_and_restart(event))
  693. continue;
  694. data.period = event->hw.last_period;
  695. if (perf_event_overflow(event, 1, &data, regs))
  696. intel_pmu_disable_event(&event->hw, bit);
  697. }
  698. intel_pmu_ack_status(ack);
  699. /*
  700. * Repeat if there is more work to be done:
  701. */
  702. status = intel_pmu_get_status();
  703. if (status)
  704. goto again;
  705. perf_enable();
  706. return 1;
  707. }
  708. static struct event_constraint bts_constraint =
  709. EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
  710. static struct event_constraint *
  711. intel_special_constraints(struct perf_event *event)
  712. {
  713. unsigned int hw_event;
  714. hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
  715. if (unlikely((hw_event ==
  716. x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
  717. (event->hw.sample_period == 1))) {
  718. return &bts_constraint;
  719. }
  720. return NULL;
  721. }
  722. static struct event_constraint *
  723. intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  724. {
  725. struct event_constraint *c;
  726. c = intel_special_constraints(event);
  727. if (c)
  728. return c;
  729. return x86_get_event_constraints(cpuc, event);
  730. }
  731. static __initconst struct x86_pmu core_pmu = {
  732. .name = "core",
  733. .handle_irq = x86_pmu_handle_irq,
  734. .disable_all = x86_pmu_disable_all,
  735. .enable_all = x86_pmu_enable_all,
  736. .enable = x86_pmu_enable_event,
  737. .disable = x86_pmu_disable_event,
  738. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  739. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  740. .event_map = intel_pmu_event_map,
  741. .raw_event = intel_pmu_raw_event,
  742. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  743. .apic = 1,
  744. /*
  745. * Intel PMCs cannot be accessed sanely above 32 bit width,
  746. * so we install an artificial 1<<31 period regardless of
  747. * the generic event period:
  748. */
  749. .max_period = (1ULL << 31) - 1,
  750. .get_event_constraints = intel_get_event_constraints,
  751. .event_constraints = intel_core_event_constraints,
  752. };
  753. static __initconst struct x86_pmu intel_pmu = {
  754. .name = "Intel",
  755. .handle_irq = intel_pmu_handle_irq,
  756. .disable_all = intel_pmu_disable_all,
  757. .enable_all = intel_pmu_enable_all,
  758. .enable = intel_pmu_enable_event,
  759. .disable = intel_pmu_disable_event,
  760. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  761. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  762. .event_map = intel_pmu_event_map,
  763. .raw_event = intel_pmu_raw_event,
  764. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  765. .apic = 1,
  766. /*
  767. * Intel PMCs cannot be accessed sanely above 32 bit width,
  768. * so we install an artificial 1<<31 period regardless of
  769. * the generic event period:
  770. */
  771. .max_period = (1ULL << 31) - 1,
  772. .enable_bts = intel_pmu_enable_bts,
  773. .disable_bts = intel_pmu_disable_bts,
  774. .get_event_constraints = intel_get_event_constraints
  775. };
  776. static __init int intel_pmu_init(void)
  777. {
  778. union cpuid10_edx edx;
  779. union cpuid10_eax eax;
  780. unsigned int unused;
  781. unsigned int ebx;
  782. int version;
  783. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  784. /* check for P6 processor family */
  785. if (boot_cpu_data.x86 == 6) {
  786. return p6_pmu_init();
  787. } else {
  788. return -ENODEV;
  789. }
  790. }
  791. /*
  792. * Check whether the Architectural PerfMon supports
  793. * Branch Misses Retired hw_event or not.
  794. */
  795. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  796. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  797. return -ENODEV;
  798. version = eax.split.version_id;
  799. if (version < 2)
  800. x86_pmu = core_pmu;
  801. else
  802. x86_pmu = intel_pmu;
  803. x86_pmu.version = version;
  804. x86_pmu.num_events = eax.split.num_events;
  805. x86_pmu.event_bits = eax.split.bit_width;
  806. x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
  807. /*
  808. * Quirk: v2 perfmon does not report fixed-purpose events, so
  809. * assume at least 3 events:
  810. */
  811. if (version > 1)
  812. x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
  813. /*
  814. * Install the hw-cache-events table:
  815. */
  816. switch (boot_cpu_data.x86_model) {
  817. case 14: /* 65 nm core solo/duo, "Yonah" */
  818. pr_cont("Core events, ");
  819. break;
  820. case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
  821. case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  822. case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  823. case 29: /* six-core 45 nm xeon "Dunnington" */
  824. memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  825. sizeof(hw_cache_event_ids));
  826. x86_pmu.event_constraints = intel_core2_event_constraints;
  827. pr_cont("Core2 events, ");
  828. break;
  829. case 26: /* 45 nm nehalem, "Bloomfield" */
  830. case 30: /* 45 nm nehalem, "Lynnfield" */
  831. memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  832. sizeof(hw_cache_event_ids));
  833. x86_pmu.event_constraints = intel_nehalem_event_constraints;
  834. pr_cont("Nehalem/Corei7 events, ");
  835. break;
  836. case 28: /* Atom */
  837. memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  838. sizeof(hw_cache_event_ids));
  839. x86_pmu.event_constraints = intel_gen_event_constraints;
  840. pr_cont("Atom events, ");
  841. break;
  842. case 37: /* 32 nm nehalem, "Clarkdale" */
  843. case 44: /* 32 nm nehalem, "Gulftown" */
  844. memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  845. sizeof(hw_cache_event_ids));
  846. x86_pmu.event_constraints = intel_westmere_event_constraints;
  847. pr_cont("Westmere events, ");
  848. break;
  849. default:
  850. /*
  851. * default constraints for v2 and up
  852. */
  853. x86_pmu.event_constraints = intel_gen_event_constraints;
  854. pr_cont("generic architected perfmon, ");
  855. }
  856. return 0;
  857. }
  858. #else /* CONFIG_CPU_SUP_INTEL */
  859. static int intel_pmu_init(void)
  860. {
  861. return 0;
  862. }
  863. #endif /* CONFIG_CPU_SUP_INTEL */