perf_event_intel.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886
  1. /*
  2. * Per core/cpu state
  3. *
  4. * Used to coordinate shared registers between HT threads or
  5. * among events on a single PMU.
  6. */
  7. #include <linux/stddef.h>
  8. #include <linux/types.h>
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/export.h>
  12. #include <asm/hardirq.h>
  13. #include <asm/apic.h>
  14. #include "perf_event.h"
  15. /*
  16. * Intel PerfMon, used on Core and later.
  17. */
  18. static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
  19. {
  20. [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
  21. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  22. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
  23. [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
  24. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  25. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  26. [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
  27. [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
  28. };
  29. static struct event_constraint intel_core_event_constraints[] __read_mostly =
  30. {
  31. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  32. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  33. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  34. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  35. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  36. INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  37. EVENT_CONSTRAINT_END
  38. };
  39. static struct event_constraint intel_core2_event_constraints[] __read_mostly =
  40. {
  41. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  42. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  43. FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  44. INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  45. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  46. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  47. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  48. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  49. INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  50. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  51. INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  52. INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  53. INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  54. EVENT_CONSTRAINT_END
  55. };
  56. static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
  57. {
  58. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  59. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  60. FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  61. INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  62. INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  63. INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  64. INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  65. INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  66. INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  67. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  68. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  69. EVENT_CONSTRAINT_END
  70. };
  71. static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  72. {
  73. INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  74. EVENT_EXTRA_END
  75. };
  76. static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
  77. {
  78. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  79. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  80. FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  81. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  82. INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  83. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  84. INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
  85. EVENT_CONSTRAINT_END
  86. };
  87. static struct event_constraint intel_snb_event_constraints[] __read_mostly =
  88. {
  89. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  90. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  91. FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  92. INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
  93. INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
  94. INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
  95. EVENT_CONSTRAINT_END
  96. };
  97. static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
  98. {
  99. INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  100. INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
  101. EVENT_EXTRA_END
  102. };
  103. static struct event_constraint intel_v1_event_constraints[] __read_mostly =
  104. {
  105. EVENT_CONSTRAINT_END
  106. };
  107. static struct event_constraint intel_gen_event_constraints[] __read_mostly =
  108. {
  109. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  110. FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  111. FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  112. EVENT_CONSTRAINT_END
  113. };
  114. static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
  115. INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
  116. INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
  117. EVENT_EXTRA_END
  118. };
  119. static u64 intel_pmu_event_map(int hw_event)
  120. {
  121. return intel_perfmon_event_map[hw_event];
  122. }
  123. static __initconst const u64 snb_hw_cache_event_ids
  124. [PERF_COUNT_HW_CACHE_MAX]
  125. [PERF_COUNT_HW_CACHE_OP_MAX]
  126. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  127. {
  128. [ C(L1D) ] = {
  129. [ C(OP_READ) ] = {
  130. [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
  131. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
  132. },
  133. [ C(OP_WRITE) ] = {
  134. [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
  135. [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
  136. },
  137. [ C(OP_PREFETCH) ] = {
  138. [ C(RESULT_ACCESS) ] = 0x0,
  139. [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
  140. },
  141. },
  142. [ C(L1I ) ] = {
  143. [ C(OP_READ) ] = {
  144. [ C(RESULT_ACCESS) ] = 0x0,
  145. [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
  146. },
  147. [ C(OP_WRITE) ] = {
  148. [ C(RESULT_ACCESS) ] = -1,
  149. [ C(RESULT_MISS) ] = -1,
  150. },
  151. [ C(OP_PREFETCH) ] = {
  152. [ C(RESULT_ACCESS) ] = 0x0,
  153. [ C(RESULT_MISS) ] = 0x0,
  154. },
  155. },
  156. [ C(LL ) ] = {
  157. [ C(OP_READ) ] = {
  158. /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
  159. [ C(RESULT_ACCESS) ] = 0x01b7,
  160. /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  161. [ C(RESULT_MISS) ] = 0x01b7,
  162. },
  163. [ C(OP_WRITE) ] = {
  164. /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
  165. [ C(RESULT_ACCESS) ] = 0x01b7,
  166. /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
  167. [ C(RESULT_MISS) ] = 0x01b7,
  168. },
  169. [ C(OP_PREFETCH) ] = {
  170. /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
  171. [ C(RESULT_ACCESS) ] = 0x01b7,
  172. /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  173. [ C(RESULT_MISS) ] = 0x01b7,
  174. },
  175. },
  176. [ C(DTLB) ] = {
  177. [ C(OP_READ) ] = {
  178. [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
  179. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
  180. },
  181. [ C(OP_WRITE) ] = {
  182. [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
  183. [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
  184. },
  185. [ C(OP_PREFETCH) ] = {
  186. [ C(RESULT_ACCESS) ] = 0x0,
  187. [ C(RESULT_MISS) ] = 0x0,
  188. },
  189. },
  190. [ C(ITLB) ] = {
  191. [ C(OP_READ) ] = {
  192. [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
  193. [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
  194. },
  195. [ C(OP_WRITE) ] = {
  196. [ C(RESULT_ACCESS) ] = -1,
  197. [ C(RESULT_MISS) ] = -1,
  198. },
  199. [ C(OP_PREFETCH) ] = {
  200. [ C(RESULT_ACCESS) ] = -1,
  201. [ C(RESULT_MISS) ] = -1,
  202. },
  203. },
  204. [ C(BPU ) ] = {
  205. [ C(OP_READ) ] = {
  206. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  207. [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
  208. },
  209. [ C(OP_WRITE) ] = {
  210. [ C(RESULT_ACCESS) ] = -1,
  211. [ C(RESULT_MISS) ] = -1,
  212. },
  213. [ C(OP_PREFETCH) ] = {
  214. [ C(RESULT_ACCESS) ] = -1,
  215. [ C(RESULT_MISS) ] = -1,
  216. },
  217. },
  218. [ C(NODE) ] = {
  219. [ C(OP_READ) ] = {
  220. [ C(RESULT_ACCESS) ] = -1,
  221. [ C(RESULT_MISS) ] = -1,
  222. },
  223. [ C(OP_WRITE) ] = {
  224. [ C(RESULT_ACCESS) ] = -1,
  225. [ C(RESULT_MISS) ] = -1,
  226. },
  227. [ C(OP_PREFETCH) ] = {
  228. [ C(RESULT_ACCESS) ] = -1,
  229. [ C(RESULT_MISS) ] = -1,
  230. },
  231. },
  232. };
  233. static __initconst const u64 westmere_hw_cache_event_ids
  234. [PERF_COUNT_HW_CACHE_MAX]
  235. [PERF_COUNT_HW_CACHE_OP_MAX]
  236. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  237. {
  238. [ C(L1D) ] = {
  239. [ C(OP_READ) ] = {
  240. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  241. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
  242. },
  243. [ C(OP_WRITE) ] = {
  244. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  245. [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
  246. },
  247. [ C(OP_PREFETCH) ] = {
  248. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  249. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  250. },
  251. },
  252. [ C(L1I ) ] = {
  253. [ C(OP_READ) ] = {
  254. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  255. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  256. },
  257. [ C(OP_WRITE) ] = {
  258. [ C(RESULT_ACCESS) ] = -1,
  259. [ C(RESULT_MISS) ] = -1,
  260. },
  261. [ C(OP_PREFETCH) ] = {
  262. [ C(RESULT_ACCESS) ] = 0x0,
  263. [ C(RESULT_MISS) ] = 0x0,
  264. },
  265. },
  266. [ C(LL ) ] = {
  267. [ C(OP_READ) ] = {
  268. /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
  269. [ C(RESULT_ACCESS) ] = 0x01b7,
  270. /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  271. [ C(RESULT_MISS) ] = 0x01b7,
  272. },
  273. /*
  274. * Use RFO, not WRITEBACK, because a write miss would typically occur
  275. * on RFO.
  276. */
  277. [ C(OP_WRITE) ] = {
  278. /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
  279. [ C(RESULT_ACCESS) ] = 0x01b7,
  280. /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
  281. [ C(RESULT_MISS) ] = 0x01b7,
  282. },
  283. [ C(OP_PREFETCH) ] = {
  284. /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
  285. [ C(RESULT_ACCESS) ] = 0x01b7,
  286. /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  287. [ C(RESULT_MISS) ] = 0x01b7,
  288. },
  289. },
  290. [ C(DTLB) ] = {
  291. [ C(OP_READ) ] = {
  292. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  293. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  294. },
  295. [ C(OP_WRITE) ] = {
  296. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  297. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  298. },
  299. [ C(OP_PREFETCH) ] = {
  300. [ C(RESULT_ACCESS) ] = 0x0,
  301. [ C(RESULT_MISS) ] = 0x0,
  302. },
  303. },
  304. [ C(ITLB) ] = {
  305. [ C(OP_READ) ] = {
  306. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  307. [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
  308. },
  309. [ C(OP_WRITE) ] = {
  310. [ C(RESULT_ACCESS) ] = -1,
  311. [ C(RESULT_MISS) ] = -1,
  312. },
  313. [ C(OP_PREFETCH) ] = {
  314. [ C(RESULT_ACCESS) ] = -1,
  315. [ C(RESULT_MISS) ] = -1,
  316. },
  317. },
  318. [ C(BPU ) ] = {
  319. [ C(OP_READ) ] = {
  320. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  321. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  322. },
  323. [ C(OP_WRITE) ] = {
  324. [ C(RESULT_ACCESS) ] = -1,
  325. [ C(RESULT_MISS) ] = -1,
  326. },
  327. [ C(OP_PREFETCH) ] = {
  328. [ C(RESULT_ACCESS) ] = -1,
  329. [ C(RESULT_MISS) ] = -1,
  330. },
  331. },
  332. [ C(NODE) ] = {
  333. [ C(OP_READ) ] = {
  334. [ C(RESULT_ACCESS) ] = 0x01b7,
  335. [ C(RESULT_MISS) ] = 0x01b7,
  336. },
  337. [ C(OP_WRITE) ] = {
  338. [ C(RESULT_ACCESS) ] = 0x01b7,
  339. [ C(RESULT_MISS) ] = 0x01b7,
  340. },
  341. [ C(OP_PREFETCH) ] = {
  342. [ C(RESULT_ACCESS) ] = 0x01b7,
  343. [ C(RESULT_MISS) ] = 0x01b7,
  344. },
  345. },
  346. };
  347. /*
  348. * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
  349. * See IA32 SDM Vol 3B 30.6.1.3
  350. */
  351. #define NHM_DMND_DATA_RD (1 << 0)
  352. #define NHM_DMND_RFO (1 << 1)
  353. #define NHM_DMND_IFETCH (1 << 2)
  354. #define NHM_DMND_WB (1 << 3)
  355. #define NHM_PF_DATA_RD (1 << 4)
  356. #define NHM_PF_DATA_RFO (1 << 5)
  357. #define NHM_PF_IFETCH (1 << 6)
  358. #define NHM_OFFCORE_OTHER (1 << 7)
  359. #define NHM_UNCORE_HIT (1 << 8)
  360. #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
  361. #define NHM_OTHER_CORE_HITM (1 << 10)
  362. /* reserved */
  363. #define NHM_REMOTE_CACHE_FWD (1 << 12)
  364. #define NHM_REMOTE_DRAM (1 << 13)
  365. #define NHM_LOCAL_DRAM (1 << 14)
  366. #define NHM_NON_DRAM (1 << 15)
  367. #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
  368. #define NHM_REMOTE (NHM_REMOTE_DRAM)
  369. #define NHM_DMND_READ (NHM_DMND_DATA_RD)
  370. #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
  371. #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
  372. #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
  373. #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
  374. #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
  375. static __initconst const u64 nehalem_hw_cache_extra_regs
  376. [PERF_COUNT_HW_CACHE_MAX]
  377. [PERF_COUNT_HW_CACHE_OP_MAX]
  378. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  379. {
  380. [ C(LL ) ] = {
  381. [ C(OP_READ) ] = {
  382. [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
  383. [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
  384. },
  385. [ C(OP_WRITE) ] = {
  386. [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
  387. [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
  388. },
  389. [ C(OP_PREFETCH) ] = {
  390. [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
  391. [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
  392. },
  393. },
  394. [ C(NODE) ] = {
  395. [ C(OP_READ) ] = {
  396. [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
  397. [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
  398. },
  399. [ C(OP_WRITE) ] = {
  400. [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
  401. [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
  402. },
  403. [ C(OP_PREFETCH) ] = {
  404. [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
  405. [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
  406. },
  407. },
  408. };
  409. static __initconst const u64 nehalem_hw_cache_event_ids
  410. [PERF_COUNT_HW_CACHE_MAX]
  411. [PERF_COUNT_HW_CACHE_OP_MAX]
  412. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  413. {
  414. [ C(L1D) ] = {
  415. [ C(OP_READ) ] = {
  416. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  417. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
  418. },
  419. [ C(OP_WRITE) ] = {
  420. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  421. [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
  422. },
  423. [ C(OP_PREFETCH) ] = {
  424. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  425. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  426. },
  427. },
  428. [ C(L1I ) ] = {
  429. [ C(OP_READ) ] = {
  430. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  431. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  432. },
  433. [ C(OP_WRITE) ] = {
  434. [ C(RESULT_ACCESS) ] = -1,
  435. [ C(RESULT_MISS) ] = -1,
  436. },
  437. [ C(OP_PREFETCH) ] = {
  438. [ C(RESULT_ACCESS) ] = 0x0,
  439. [ C(RESULT_MISS) ] = 0x0,
  440. },
  441. },
  442. [ C(LL ) ] = {
  443. [ C(OP_READ) ] = {
  444. /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
  445. [ C(RESULT_ACCESS) ] = 0x01b7,
  446. /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
  447. [ C(RESULT_MISS) ] = 0x01b7,
  448. },
  449. /*
  450. * Use RFO, not WRITEBACK, because a write miss would typically occur
  451. * on RFO.
  452. */
  453. [ C(OP_WRITE) ] = {
  454. /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
  455. [ C(RESULT_ACCESS) ] = 0x01b7,
  456. /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
  457. [ C(RESULT_MISS) ] = 0x01b7,
  458. },
  459. [ C(OP_PREFETCH) ] = {
  460. /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
  461. [ C(RESULT_ACCESS) ] = 0x01b7,
  462. /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
  463. [ C(RESULT_MISS) ] = 0x01b7,
  464. },
  465. },
  466. [ C(DTLB) ] = {
  467. [ C(OP_READ) ] = {
  468. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  469. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  470. },
  471. [ C(OP_WRITE) ] = {
  472. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  473. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  474. },
  475. [ C(OP_PREFETCH) ] = {
  476. [ C(RESULT_ACCESS) ] = 0x0,
  477. [ C(RESULT_MISS) ] = 0x0,
  478. },
  479. },
  480. [ C(ITLB) ] = {
  481. [ C(OP_READ) ] = {
  482. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  483. [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
  484. },
  485. [ C(OP_WRITE) ] = {
  486. [ C(RESULT_ACCESS) ] = -1,
  487. [ C(RESULT_MISS) ] = -1,
  488. },
  489. [ C(OP_PREFETCH) ] = {
  490. [ C(RESULT_ACCESS) ] = -1,
  491. [ C(RESULT_MISS) ] = -1,
  492. },
  493. },
  494. [ C(BPU ) ] = {
  495. [ C(OP_READ) ] = {
  496. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  497. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  498. },
  499. [ C(OP_WRITE) ] = {
  500. [ C(RESULT_ACCESS) ] = -1,
  501. [ C(RESULT_MISS) ] = -1,
  502. },
  503. [ C(OP_PREFETCH) ] = {
  504. [ C(RESULT_ACCESS) ] = -1,
  505. [ C(RESULT_MISS) ] = -1,
  506. },
  507. },
  508. [ C(NODE) ] = {
  509. [ C(OP_READ) ] = {
  510. [ C(RESULT_ACCESS) ] = 0x01b7,
  511. [ C(RESULT_MISS) ] = 0x01b7,
  512. },
  513. [ C(OP_WRITE) ] = {
  514. [ C(RESULT_ACCESS) ] = 0x01b7,
  515. [ C(RESULT_MISS) ] = 0x01b7,
  516. },
  517. [ C(OP_PREFETCH) ] = {
  518. [ C(RESULT_ACCESS) ] = 0x01b7,
  519. [ C(RESULT_MISS) ] = 0x01b7,
  520. },
  521. },
  522. };
  523. static __initconst const u64 core2_hw_cache_event_ids
  524. [PERF_COUNT_HW_CACHE_MAX]
  525. [PERF_COUNT_HW_CACHE_OP_MAX]
  526. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  527. {
  528. [ C(L1D) ] = {
  529. [ C(OP_READ) ] = {
  530. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  531. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  532. },
  533. [ C(OP_WRITE) ] = {
  534. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  535. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  536. },
  537. [ C(OP_PREFETCH) ] = {
  538. [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
  539. [ C(RESULT_MISS) ] = 0,
  540. },
  541. },
  542. [ C(L1I ) ] = {
  543. [ C(OP_READ) ] = {
  544. [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
  545. [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
  546. },
  547. [ C(OP_WRITE) ] = {
  548. [ C(RESULT_ACCESS) ] = -1,
  549. [ C(RESULT_MISS) ] = -1,
  550. },
  551. [ C(OP_PREFETCH) ] = {
  552. [ C(RESULT_ACCESS) ] = 0,
  553. [ C(RESULT_MISS) ] = 0,
  554. },
  555. },
  556. [ C(LL ) ] = {
  557. [ C(OP_READ) ] = {
  558. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  559. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  560. },
  561. [ C(OP_WRITE) ] = {
  562. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  563. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  564. },
  565. [ C(OP_PREFETCH) ] = {
  566. [ C(RESULT_ACCESS) ] = 0,
  567. [ C(RESULT_MISS) ] = 0,
  568. },
  569. },
  570. [ C(DTLB) ] = {
  571. [ C(OP_READ) ] = {
  572. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  573. [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
  574. },
  575. [ C(OP_WRITE) ] = {
  576. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  577. [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
  578. },
  579. [ C(OP_PREFETCH) ] = {
  580. [ C(RESULT_ACCESS) ] = 0,
  581. [ C(RESULT_MISS) ] = 0,
  582. },
  583. },
  584. [ C(ITLB) ] = {
  585. [ C(OP_READ) ] = {
  586. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  587. [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
  588. },
  589. [ C(OP_WRITE) ] = {
  590. [ C(RESULT_ACCESS) ] = -1,
  591. [ C(RESULT_MISS) ] = -1,
  592. },
  593. [ C(OP_PREFETCH) ] = {
  594. [ C(RESULT_ACCESS) ] = -1,
  595. [ C(RESULT_MISS) ] = -1,
  596. },
  597. },
  598. [ C(BPU ) ] = {
  599. [ C(OP_READ) ] = {
  600. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  601. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  602. },
  603. [ C(OP_WRITE) ] = {
  604. [ C(RESULT_ACCESS) ] = -1,
  605. [ C(RESULT_MISS) ] = -1,
  606. },
  607. [ C(OP_PREFETCH) ] = {
  608. [ C(RESULT_ACCESS) ] = -1,
  609. [ C(RESULT_MISS) ] = -1,
  610. },
  611. },
  612. };
  613. static __initconst const u64 atom_hw_cache_event_ids
  614. [PERF_COUNT_HW_CACHE_MAX]
  615. [PERF_COUNT_HW_CACHE_OP_MAX]
  616. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  617. {
  618. [ C(L1D) ] = {
  619. [ C(OP_READ) ] = {
  620. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
  621. [ C(RESULT_MISS) ] = 0,
  622. },
  623. [ C(OP_WRITE) ] = {
  624. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
  625. [ C(RESULT_MISS) ] = 0,
  626. },
  627. [ C(OP_PREFETCH) ] = {
  628. [ C(RESULT_ACCESS) ] = 0x0,
  629. [ C(RESULT_MISS) ] = 0,
  630. },
  631. },
  632. [ C(L1I ) ] = {
  633. [ C(OP_READ) ] = {
  634. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  635. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  636. },
  637. [ C(OP_WRITE) ] = {
  638. [ C(RESULT_ACCESS) ] = -1,
  639. [ C(RESULT_MISS) ] = -1,
  640. },
  641. [ C(OP_PREFETCH) ] = {
  642. [ C(RESULT_ACCESS) ] = 0,
  643. [ C(RESULT_MISS) ] = 0,
  644. },
  645. },
  646. [ C(LL ) ] = {
  647. [ C(OP_READ) ] = {
  648. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  649. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  650. },
  651. [ C(OP_WRITE) ] = {
  652. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  653. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  654. },
  655. [ C(OP_PREFETCH) ] = {
  656. [ C(RESULT_ACCESS) ] = 0,
  657. [ C(RESULT_MISS) ] = 0,
  658. },
  659. },
  660. [ C(DTLB) ] = {
  661. [ C(OP_READ) ] = {
  662. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
  663. [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
  664. },
  665. [ C(OP_WRITE) ] = {
  666. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
  667. [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
  668. },
  669. [ C(OP_PREFETCH) ] = {
  670. [ C(RESULT_ACCESS) ] = 0,
  671. [ C(RESULT_MISS) ] = 0,
  672. },
  673. },
  674. [ C(ITLB) ] = {
  675. [ C(OP_READ) ] = {
  676. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  677. [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
  678. },
  679. [ C(OP_WRITE) ] = {
  680. [ C(RESULT_ACCESS) ] = -1,
  681. [ C(RESULT_MISS) ] = -1,
  682. },
  683. [ C(OP_PREFETCH) ] = {
  684. [ C(RESULT_ACCESS) ] = -1,
  685. [ C(RESULT_MISS) ] = -1,
  686. },
  687. },
  688. [ C(BPU ) ] = {
  689. [ C(OP_READ) ] = {
  690. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  691. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  692. },
  693. [ C(OP_WRITE) ] = {
  694. [ C(RESULT_ACCESS) ] = -1,
  695. [ C(RESULT_MISS) ] = -1,
  696. },
  697. [ C(OP_PREFETCH) ] = {
  698. [ C(RESULT_ACCESS) ] = -1,
  699. [ C(RESULT_MISS) ] = -1,
  700. },
  701. },
  702. };
  703. static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
  704. {
  705. /* user explicitly requested branch sampling */
  706. if (has_branch_stack(event))
  707. return true;
  708. /* implicit branch sampling to correct PEBS skid */
  709. if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
  710. return true;
  711. return false;
  712. }
  713. static void intel_pmu_disable_all(void)
  714. {
  715. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  716. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  717. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  718. intel_pmu_disable_bts();
  719. intel_pmu_pebs_disable_all();
  720. intel_pmu_lbr_disable_all();
  721. }
  722. static void intel_pmu_enable_all(int added)
  723. {
  724. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  725. intel_pmu_pebs_enable_all();
  726. intel_pmu_lbr_enable_all();
  727. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
  728. x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
  729. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  730. struct perf_event *event =
  731. cpuc->events[X86_PMC_IDX_FIXED_BTS];
  732. if (WARN_ON_ONCE(!event))
  733. return;
  734. intel_pmu_enable_bts(event->hw.config);
  735. }
  736. }
  737. /*
  738. * Workaround for:
  739. * Intel Errata AAK100 (model 26)
  740. * Intel Errata AAP53 (model 30)
  741. * Intel Errata BD53 (model 44)
  742. *
  743. * The official story:
  744. * These chips need to be 'reset' when adding counters by programming the
  745. * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
  746. * in sequence on the same PMC or on different PMCs.
  747. *
  748. * In practise it appears some of these events do in fact count, and
  749. * we need to programm all 4 events.
  750. */
  751. static void intel_pmu_nhm_workaround(void)
  752. {
  753. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  754. static const unsigned long nhm_magic[4] = {
  755. 0x4300B5,
  756. 0x4300D2,
  757. 0x4300B1,
  758. 0x4300B1
  759. };
  760. struct perf_event *event;
  761. int i;
  762. /*
  763. * The Errata requires below steps:
  764. * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
  765. * 2) Configure 4 PERFEVTSELx with the magic events and clear
  766. * the corresponding PMCx;
  767. * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
  768. * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
  769. * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
  770. */
  771. /*
  772. * The real steps we choose are a little different from above.
  773. * A) To reduce MSR operations, we don't run step 1) as they
  774. * are already cleared before this function is called;
  775. * B) Call x86_perf_event_update to save PMCx before configuring
  776. * PERFEVTSELx with magic number;
  777. * C) With step 5), we do clear only when the PERFEVTSELx is
  778. * not used currently.
  779. * D) Call x86_perf_event_set_period to restore PMCx;
  780. */
  781. /* We always operate 4 pairs of PERF Counters */
  782. for (i = 0; i < 4; i++) {
  783. event = cpuc->events[i];
  784. if (event)
  785. x86_perf_event_update(event);
  786. }
  787. for (i = 0; i < 4; i++) {
  788. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
  789. wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
  790. }
  791. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
  792. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
  793. for (i = 0; i < 4; i++) {
  794. event = cpuc->events[i];
  795. if (event) {
  796. x86_perf_event_set_period(event);
  797. __x86_pmu_enable_event(&event->hw,
  798. ARCH_PERFMON_EVENTSEL_ENABLE);
  799. } else
  800. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
  801. }
  802. }
  803. static void intel_pmu_nhm_enable_all(int added)
  804. {
  805. if (added)
  806. intel_pmu_nhm_workaround();
  807. intel_pmu_enable_all(added);
  808. }
  809. static inline u64 intel_pmu_get_status(void)
  810. {
  811. u64 status;
  812. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  813. return status;
  814. }
  815. static inline void intel_pmu_ack_status(u64 ack)
  816. {
  817. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  818. }
  819. static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
  820. {
  821. int idx = hwc->idx - X86_PMC_IDX_FIXED;
  822. u64 ctrl_val, mask;
  823. mask = 0xfULL << (idx * 4);
  824. rdmsrl(hwc->config_base, ctrl_val);
  825. ctrl_val &= ~mask;
  826. wrmsrl(hwc->config_base, ctrl_val);
  827. }
  828. static void intel_pmu_disable_event(struct perf_event *event)
  829. {
  830. struct hw_perf_event *hwc = &event->hw;
  831. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  832. if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
  833. intel_pmu_disable_bts();
  834. intel_pmu_drain_bts_buffer();
  835. return;
  836. }
  837. cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
  838. cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
  839. /*
  840. * must disable before any actual event
  841. * because any event may be combined with LBR
  842. */
  843. if (intel_pmu_needs_lbr_smpl(event))
  844. intel_pmu_lbr_disable(event);
  845. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  846. intel_pmu_disable_fixed(hwc);
  847. return;
  848. }
  849. x86_pmu_disable_event(event);
  850. if (unlikely(event->attr.precise_ip))
  851. intel_pmu_pebs_disable(event);
  852. }
  853. static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
  854. {
  855. int idx = hwc->idx - X86_PMC_IDX_FIXED;
  856. u64 ctrl_val, bits, mask;
  857. /*
  858. * Enable IRQ generation (0x8),
  859. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  860. * if requested:
  861. */
  862. bits = 0x8ULL;
  863. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  864. bits |= 0x2;
  865. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  866. bits |= 0x1;
  867. /*
  868. * ANY bit is supported in v3 and up
  869. */
  870. if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  871. bits |= 0x4;
  872. bits <<= (idx * 4);
  873. mask = 0xfULL << (idx * 4);
  874. rdmsrl(hwc->config_base, ctrl_val);
  875. ctrl_val &= ~mask;
  876. ctrl_val |= bits;
  877. wrmsrl(hwc->config_base, ctrl_val);
  878. }
  879. static void intel_pmu_enable_event(struct perf_event *event)
  880. {
  881. struct hw_perf_event *hwc = &event->hw;
  882. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  883. if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
  884. if (!__this_cpu_read(cpu_hw_events.enabled))
  885. return;
  886. intel_pmu_enable_bts(hwc->config);
  887. return;
  888. }
  889. /*
  890. * must enabled before any actual event
  891. * because any event may be combined with LBR
  892. */
  893. if (intel_pmu_needs_lbr_smpl(event))
  894. intel_pmu_lbr_enable(event);
  895. if (event->attr.exclude_host)
  896. cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
  897. if (event->attr.exclude_guest)
  898. cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
  899. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  900. intel_pmu_enable_fixed(hwc);
  901. return;
  902. }
  903. if (unlikely(event->attr.precise_ip))
  904. intel_pmu_pebs_enable(event);
  905. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  906. }
  907. /*
  908. * Save and restart an expired event. Called by NMI contexts,
  909. * so it has to be careful about preempting normal event ops:
  910. */
  911. int intel_pmu_save_and_restart(struct perf_event *event)
  912. {
  913. x86_perf_event_update(event);
  914. return x86_perf_event_set_period(event);
  915. }
  916. static void intel_pmu_reset(void)
  917. {
  918. struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
  919. unsigned long flags;
  920. int idx;
  921. if (!x86_pmu.num_counters)
  922. return;
  923. local_irq_save(flags);
  924. printk("clearing PMU state on CPU#%d\n", smp_processor_id());
  925. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  926. checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
  927. checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
  928. }
  929. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
  930. checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
  931. if (ds)
  932. ds->bts_index = ds->bts_buffer_base;
  933. local_irq_restore(flags);
  934. }
  935. /*
  936. * This handler is triggered by the local APIC, so the APIC IRQ handling
  937. * rules apply:
  938. */
  939. static int intel_pmu_handle_irq(struct pt_regs *regs)
  940. {
  941. struct perf_sample_data data;
  942. struct cpu_hw_events *cpuc;
  943. int bit, loops;
  944. u64 status;
  945. int handled;
  946. perf_sample_data_init(&data, 0);
  947. cpuc = &__get_cpu_var(cpu_hw_events);
  948. /*
  949. * Some chipsets need to unmask the LVTPC in a particular spot
  950. * inside the nmi handler. As a result, the unmasking was pushed
  951. * into all the nmi handlers.
  952. *
  953. * This handler doesn't seem to have any issues with the unmasking
  954. * so it was left at the top.
  955. */
  956. apic_write(APIC_LVTPC, APIC_DM_NMI);
  957. intel_pmu_disable_all();
  958. handled = intel_pmu_drain_bts_buffer();
  959. status = intel_pmu_get_status();
  960. if (!status) {
  961. intel_pmu_enable_all(0);
  962. return handled;
  963. }
  964. loops = 0;
  965. again:
  966. intel_pmu_ack_status(status);
  967. if (++loops > 100) {
  968. WARN_ONCE(1, "perfevents: irq loop stuck!\n");
  969. perf_event_print_debug();
  970. intel_pmu_reset();
  971. goto done;
  972. }
  973. inc_irq_stat(apic_perf_irqs);
  974. intel_pmu_lbr_read();
  975. /*
  976. * PEBS overflow sets bit 62 in the global status register
  977. */
  978. if (__test_and_clear_bit(62, (unsigned long *)&status)) {
  979. handled++;
  980. x86_pmu.drain_pebs(regs);
  981. }
  982. for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  983. struct perf_event *event = cpuc->events[bit];
  984. handled++;
  985. if (!test_bit(bit, cpuc->active_mask))
  986. continue;
  987. if (!intel_pmu_save_and_restart(event))
  988. continue;
  989. data.period = event->hw.last_period;
  990. if (has_branch_stack(event))
  991. data.br_stack = &cpuc->lbr_stack;
  992. if (perf_event_overflow(event, &data, regs))
  993. x86_pmu_stop(event, 0);
  994. }
  995. /*
  996. * Repeat if there is more work to be done:
  997. */
  998. status = intel_pmu_get_status();
  999. if (status)
  1000. goto again;
  1001. done:
  1002. intel_pmu_enable_all(0);
  1003. return handled;
  1004. }
  1005. static struct event_constraint *
  1006. intel_bts_constraints(struct perf_event *event)
  1007. {
  1008. struct hw_perf_event *hwc = &event->hw;
  1009. unsigned int hw_event, bts_event;
  1010. if (event->attr.freq)
  1011. return NULL;
  1012. hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
  1013. bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
  1014. if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
  1015. return &bts_constraint;
  1016. return NULL;
  1017. }
  1018. static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
  1019. {
  1020. if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
  1021. return false;
  1022. if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
  1023. event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
  1024. event->hw.config |= 0x01bb;
  1025. event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
  1026. event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
  1027. } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
  1028. event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
  1029. event->hw.config |= 0x01b7;
  1030. event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
  1031. event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
  1032. }
  1033. if (event->hw.extra_reg.idx == orig_idx)
  1034. return false;
  1035. return true;
  1036. }
  1037. /*
  1038. * manage allocation of shared extra msr for certain events
  1039. *
  1040. * sharing can be:
  1041. * per-cpu: to be shared between the various events on a single PMU
  1042. * per-core: per-cpu + shared by HT threads
  1043. */
  1044. static struct event_constraint *
  1045. __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
  1046. struct perf_event *event,
  1047. struct hw_perf_event_extra *reg)
  1048. {
  1049. struct event_constraint *c = &emptyconstraint;
  1050. struct er_account *era;
  1051. unsigned long flags;
  1052. int orig_idx = reg->idx;
  1053. /* already allocated shared msr */
  1054. if (reg->alloc)
  1055. return NULL; /* call x86_get_event_constraint() */
  1056. again:
  1057. era = &cpuc->shared_regs->regs[reg->idx];
  1058. /*
  1059. * we use spin_lock_irqsave() to avoid lockdep issues when
  1060. * passing a fake cpuc
  1061. */
  1062. raw_spin_lock_irqsave(&era->lock, flags);
  1063. if (!atomic_read(&era->ref) || era->config == reg->config) {
  1064. /* lock in msr value */
  1065. era->config = reg->config;
  1066. era->reg = reg->reg;
  1067. /* one more user */
  1068. atomic_inc(&era->ref);
  1069. /* no need to reallocate during incremental event scheduling */
  1070. reg->alloc = 1;
  1071. /*
  1072. * need to call x86_get_event_constraint()
  1073. * to check if associated event has constraints
  1074. */
  1075. c = NULL;
  1076. } else if (intel_try_alt_er(event, orig_idx)) {
  1077. raw_spin_unlock_irqrestore(&era->lock, flags);
  1078. goto again;
  1079. }
  1080. raw_spin_unlock_irqrestore(&era->lock, flags);
  1081. return c;
  1082. }
  1083. static void
  1084. __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
  1085. struct hw_perf_event_extra *reg)
  1086. {
  1087. struct er_account *era;
  1088. /*
  1089. * only put constraint if extra reg was actually
  1090. * allocated. Also takes care of event which do
  1091. * not use an extra shared reg
  1092. */
  1093. if (!reg->alloc)
  1094. return;
  1095. era = &cpuc->shared_regs->regs[reg->idx];
  1096. /* one fewer user */
  1097. atomic_dec(&era->ref);
  1098. /* allocate again next time */
  1099. reg->alloc = 0;
  1100. }
  1101. static struct event_constraint *
  1102. intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
  1103. struct perf_event *event)
  1104. {
  1105. struct event_constraint *c = NULL, *d;
  1106. struct hw_perf_event_extra *xreg, *breg;
  1107. xreg = &event->hw.extra_reg;
  1108. if (xreg->idx != EXTRA_REG_NONE) {
  1109. c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
  1110. if (c == &emptyconstraint)
  1111. return c;
  1112. }
  1113. breg = &event->hw.branch_reg;
  1114. if (breg->idx != EXTRA_REG_NONE) {
  1115. d = __intel_shared_reg_get_constraints(cpuc, event, breg);
  1116. if (d == &emptyconstraint) {
  1117. __intel_shared_reg_put_constraints(cpuc, xreg);
  1118. c = d;
  1119. }
  1120. }
  1121. return c;
  1122. }
  1123. struct event_constraint *
  1124. x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1125. {
  1126. struct event_constraint *c;
  1127. if (x86_pmu.event_constraints) {
  1128. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1129. if ((event->hw.config & c->cmask) == c->code)
  1130. return c;
  1131. }
  1132. }
  1133. return &unconstrained;
  1134. }
  1135. static struct event_constraint *
  1136. intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1137. {
  1138. struct event_constraint *c;
  1139. c = intel_bts_constraints(event);
  1140. if (c)
  1141. return c;
  1142. c = intel_pebs_constraints(event);
  1143. if (c)
  1144. return c;
  1145. c = intel_shared_regs_constraints(cpuc, event);
  1146. if (c)
  1147. return c;
  1148. return x86_get_event_constraints(cpuc, event);
  1149. }
  1150. static void
  1151. intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
  1152. struct perf_event *event)
  1153. {
  1154. struct hw_perf_event_extra *reg;
  1155. reg = &event->hw.extra_reg;
  1156. if (reg->idx != EXTRA_REG_NONE)
  1157. __intel_shared_reg_put_constraints(cpuc, reg);
  1158. reg = &event->hw.branch_reg;
  1159. if (reg->idx != EXTRA_REG_NONE)
  1160. __intel_shared_reg_put_constraints(cpuc, reg);
  1161. }
  1162. static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
  1163. struct perf_event *event)
  1164. {
  1165. intel_put_shared_regs_event_constraints(cpuc, event);
  1166. }
  1167. static int intel_pmu_hw_config(struct perf_event *event)
  1168. {
  1169. int ret = x86_pmu_hw_config(event);
  1170. if (ret)
  1171. return ret;
  1172. if (event->attr.precise_ip &&
  1173. (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
  1174. /*
  1175. * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
  1176. * (0x003c) so that we can use it with PEBS.
  1177. *
  1178. * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
  1179. * PEBS capable. However we can use INST_RETIRED.ANY_P
  1180. * (0x00c0), which is a PEBS capable event, to get the same
  1181. * count.
  1182. *
  1183. * INST_RETIRED.ANY_P counts the number of cycles that retires
  1184. * CNTMASK instructions. By setting CNTMASK to a value (16)
  1185. * larger than the maximum number of instructions that can be
  1186. * retired per cycle (4) and then inverting the condition, we
  1187. * count all cycles that retire 16 or less instructions, which
  1188. * is every cycle.
  1189. *
  1190. * Thereby we gain a PEBS capable cycle counter.
  1191. */
  1192. u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
  1193. alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
  1194. event->hw.config = alt_config;
  1195. }
  1196. if (intel_pmu_needs_lbr_smpl(event)) {
  1197. ret = intel_pmu_setup_lbr_filter(event);
  1198. if (ret)
  1199. return ret;
  1200. }
  1201. if (event->attr.type != PERF_TYPE_RAW)
  1202. return 0;
  1203. if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
  1204. return 0;
  1205. if (x86_pmu.version < 3)
  1206. return -EINVAL;
  1207. if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
  1208. return -EACCES;
  1209. event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
  1210. return 0;
  1211. }
  1212. struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
  1213. {
  1214. if (x86_pmu.guest_get_msrs)
  1215. return x86_pmu.guest_get_msrs(nr);
  1216. *nr = 0;
  1217. return NULL;
  1218. }
  1219. EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
  1220. static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
  1221. {
  1222. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1223. struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
  1224. arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
  1225. arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
  1226. arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
  1227. *nr = 1;
  1228. return arr;
  1229. }
  1230. static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
  1231. {
  1232. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1233. struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
  1234. int idx;
  1235. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  1236. struct perf_event *event = cpuc->events[idx];
  1237. arr[idx].msr = x86_pmu_config_addr(idx);
  1238. arr[idx].host = arr[idx].guest = 0;
  1239. if (!test_bit(idx, cpuc->active_mask))
  1240. continue;
  1241. arr[idx].host = arr[idx].guest =
  1242. event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
  1243. if (event->attr.exclude_host)
  1244. arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  1245. else if (event->attr.exclude_guest)
  1246. arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  1247. }
  1248. *nr = x86_pmu.num_counters;
  1249. return arr;
  1250. }
  1251. static void core_pmu_enable_event(struct perf_event *event)
  1252. {
  1253. if (!event->attr.exclude_host)
  1254. x86_pmu_enable_event(event);
  1255. }
  1256. static void core_pmu_enable_all(int added)
  1257. {
  1258. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1259. int idx;
  1260. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  1261. struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
  1262. if (!test_bit(idx, cpuc->active_mask) ||
  1263. cpuc->events[idx]->attr.exclude_host)
  1264. continue;
  1265. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  1266. }
  1267. }
  1268. PMU_FORMAT_ATTR(event, "config:0-7" );
  1269. PMU_FORMAT_ATTR(umask, "config:8-15" );
  1270. PMU_FORMAT_ATTR(edge, "config:18" );
  1271. PMU_FORMAT_ATTR(pc, "config:19" );
  1272. PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
  1273. PMU_FORMAT_ATTR(inv, "config:23" );
  1274. PMU_FORMAT_ATTR(cmask, "config:24-31" );
  1275. static struct attribute *intel_arch_formats_attr[] = {
  1276. &format_attr_event.attr,
  1277. &format_attr_umask.attr,
  1278. &format_attr_edge.attr,
  1279. &format_attr_pc.attr,
  1280. &format_attr_inv.attr,
  1281. &format_attr_cmask.attr,
  1282. NULL,
  1283. };
  1284. static __initconst const struct x86_pmu core_pmu = {
  1285. .name = "core",
  1286. .handle_irq = x86_pmu_handle_irq,
  1287. .disable_all = x86_pmu_disable_all,
  1288. .enable_all = core_pmu_enable_all,
  1289. .enable = core_pmu_enable_event,
  1290. .disable = x86_pmu_disable_event,
  1291. .hw_config = x86_pmu_hw_config,
  1292. .schedule_events = x86_schedule_events,
  1293. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  1294. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  1295. .event_map = intel_pmu_event_map,
  1296. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  1297. .apic = 1,
  1298. /*
  1299. * Intel PMCs cannot be accessed sanely above 32 bit width,
  1300. * so we install an artificial 1<<31 period regardless of
  1301. * the generic event period:
  1302. */
  1303. .max_period = (1ULL << 31) - 1,
  1304. .get_event_constraints = intel_get_event_constraints,
  1305. .put_event_constraints = intel_put_event_constraints,
  1306. .event_constraints = intel_core_event_constraints,
  1307. .guest_get_msrs = core_guest_get_msrs,
  1308. .format_attrs = intel_arch_formats_attr,
  1309. };
  1310. struct intel_shared_regs *allocate_shared_regs(int cpu)
  1311. {
  1312. struct intel_shared_regs *regs;
  1313. int i;
  1314. regs = kzalloc_node(sizeof(struct intel_shared_regs),
  1315. GFP_KERNEL, cpu_to_node(cpu));
  1316. if (regs) {
  1317. /*
  1318. * initialize the locks to keep lockdep happy
  1319. */
  1320. for (i = 0; i < EXTRA_REG_MAX; i++)
  1321. raw_spin_lock_init(&regs->regs[i].lock);
  1322. regs->core_id = -1;
  1323. }
  1324. return regs;
  1325. }
  1326. static int intel_pmu_cpu_prepare(int cpu)
  1327. {
  1328. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1329. if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
  1330. return NOTIFY_OK;
  1331. cpuc->shared_regs = allocate_shared_regs(cpu);
  1332. if (!cpuc->shared_regs)
  1333. return NOTIFY_BAD;
  1334. return NOTIFY_OK;
  1335. }
  1336. static void intel_pmu_cpu_starting(int cpu)
  1337. {
  1338. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1339. int core_id = topology_core_id(cpu);
  1340. int i;
  1341. init_debug_store_on_cpu(cpu);
  1342. /*
  1343. * Deal with CPUs that don't clear their LBRs on power-up.
  1344. */
  1345. intel_pmu_lbr_reset();
  1346. cpuc->lbr_sel = NULL;
  1347. if (!cpuc->shared_regs)
  1348. return;
  1349. if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
  1350. for_each_cpu(i, topology_thread_cpumask(cpu)) {
  1351. struct intel_shared_regs *pc;
  1352. pc = per_cpu(cpu_hw_events, i).shared_regs;
  1353. if (pc && pc->core_id == core_id) {
  1354. cpuc->kfree_on_online = cpuc->shared_regs;
  1355. cpuc->shared_regs = pc;
  1356. break;
  1357. }
  1358. }
  1359. cpuc->shared_regs->core_id = core_id;
  1360. cpuc->shared_regs->refcnt++;
  1361. }
  1362. if (x86_pmu.lbr_sel_map)
  1363. cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
  1364. }
  1365. static void intel_pmu_cpu_dying(int cpu)
  1366. {
  1367. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1368. struct intel_shared_regs *pc;
  1369. pc = cpuc->shared_regs;
  1370. if (pc) {
  1371. if (pc->core_id == -1 || --pc->refcnt == 0)
  1372. kfree(pc);
  1373. cpuc->shared_regs = NULL;
  1374. }
  1375. fini_debug_store_on_cpu(cpu);
  1376. }
  1377. static void intel_pmu_flush_branch_stack(void)
  1378. {
  1379. /*
  1380. * Intel LBR does not tag entries with the
  1381. * PID of the current task, then we need to
  1382. * flush it on ctxsw
  1383. * For now, we simply reset it
  1384. */
  1385. if (x86_pmu.lbr_nr)
  1386. intel_pmu_lbr_reset();
  1387. }
  1388. PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
  1389. static struct attribute *intel_arch3_formats_attr[] = {
  1390. &format_attr_event.attr,
  1391. &format_attr_umask.attr,
  1392. &format_attr_edge.attr,
  1393. &format_attr_pc.attr,
  1394. &format_attr_any.attr,
  1395. &format_attr_inv.attr,
  1396. &format_attr_cmask.attr,
  1397. &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
  1398. NULL,
  1399. };
  1400. static __initconst const struct x86_pmu intel_pmu = {
  1401. .name = "Intel",
  1402. .handle_irq = intel_pmu_handle_irq,
  1403. .disable_all = intel_pmu_disable_all,
  1404. .enable_all = intel_pmu_enable_all,
  1405. .enable = intel_pmu_enable_event,
  1406. .disable = intel_pmu_disable_event,
  1407. .hw_config = intel_pmu_hw_config,
  1408. .schedule_events = x86_schedule_events,
  1409. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  1410. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  1411. .event_map = intel_pmu_event_map,
  1412. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  1413. .apic = 1,
  1414. /*
  1415. * Intel PMCs cannot be accessed sanely above 32 bit width,
  1416. * so we install an artificial 1<<31 period regardless of
  1417. * the generic event period:
  1418. */
  1419. .max_period = (1ULL << 31) - 1,
  1420. .get_event_constraints = intel_get_event_constraints,
  1421. .put_event_constraints = intel_put_event_constraints,
  1422. .format_attrs = intel_arch3_formats_attr,
  1423. .cpu_prepare = intel_pmu_cpu_prepare,
  1424. .cpu_starting = intel_pmu_cpu_starting,
  1425. .cpu_dying = intel_pmu_cpu_dying,
  1426. .guest_get_msrs = intel_guest_get_msrs,
  1427. .flush_branch_stack = intel_pmu_flush_branch_stack,
  1428. };
  1429. static __init void intel_clovertown_quirk(void)
  1430. {
  1431. /*
  1432. * PEBS is unreliable due to:
  1433. *
  1434. * AJ67 - PEBS may experience CPL leaks
  1435. * AJ68 - PEBS PMI may be delayed by one event
  1436. * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
  1437. * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
  1438. *
  1439. * AJ67 could be worked around by restricting the OS/USR flags.
  1440. * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
  1441. *
  1442. * AJ106 could possibly be worked around by not allowing LBR
  1443. * usage from PEBS, including the fixup.
  1444. * AJ68 could possibly be worked around by always programming
  1445. * a pebs_event_reset[0] value and coping with the lost events.
  1446. *
  1447. * But taken together it might just make sense to not enable PEBS on
  1448. * these chips.
  1449. */
  1450. printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
  1451. x86_pmu.pebs = 0;
  1452. x86_pmu.pebs_constraints = NULL;
  1453. }
  1454. static __init void intel_sandybridge_quirk(void)
  1455. {
  1456. printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
  1457. x86_pmu.pebs = 0;
  1458. x86_pmu.pebs_constraints = NULL;
  1459. }
  1460. static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
  1461. { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
  1462. { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
  1463. { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
  1464. { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
  1465. { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
  1466. { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
  1467. { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
  1468. };
  1469. static __init void intel_arch_events_quirk(void)
  1470. {
  1471. int bit;
  1472. /* disable event that reported as not presend by cpuid */
  1473. for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
  1474. intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
  1475. printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
  1476. intel_arch_events_map[bit].name);
  1477. }
  1478. }
  1479. static __init void intel_nehalem_quirk(void)
  1480. {
  1481. union cpuid10_ebx ebx;
  1482. ebx.full = x86_pmu.events_maskl;
  1483. if (ebx.split.no_branch_misses_retired) {
  1484. /*
  1485. * Erratum AAJ80 detected, we work it around by using
  1486. * the BR_MISP_EXEC.ANY event. This will over-count
  1487. * branch-misses, but it's still much better than the
  1488. * architectural event which is often completely bogus:
  1489. */
  1490. intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
  1491. ebx.split.no_branch_misses_retired = 0;
  1492. x86_pmu.events_maskl = ebx.full;
  1493. printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
  1494. }
  1495. }
  1496. __init int intel_pmu_init(void)
  1497. {
  1498. union cpuid10_edx edx;
  1499. union cpuid10_eax eax;
  1500. union cpuid10_ebx ebx;
  1501. unsigned int unused;
  1502. int version;
  1503. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  1504. switch (boot_cpu_data.x86) {
  1505. case 0x6:
  1506. return p6_pmu_init();
  1507. case 0xf:
  1508. return p4_pmu_init();
  1509. }
  1510. return -ENODEV;
  1511. }
  1512. /*
  1513. * Check whether the Architectural PerfMon supports
  1514. * Branch Misses Retired hw_event or not.
  1515. */
  1516. cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
  1517. if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
  1518. return -ENODEV;
  1519. version = eax.split.version_id;
  1520. if (version < 2)
  1521. x86_pmu = core_pmu;
  1522. else
  1523. x86_pmu = intel_pmu;
  1524. x86_pmu.version = version;
  1525. x86_pmu.num_counters = eax.split.num_counters;
  1526. x86_pmu.cntval_bits = eax.split.bit_width;
  1527. x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
  1528. x86_pmu.events_maskl = ebx.full;
  1529. x86_pmu.events_mask_len = eax.split.mask_length;
  1530. /*
  1531. * Quirk: v2 perfmon does not report fixed-purpose events, so
  1532. * assume at least 3 events:
  1533. */
  1534. if (version > 1)
  1535. x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
  1536. /*
  1537. * v2 and above have a perf capabilities MSR
  1538. */
  1539. if (version > 1) {
  1540. u64 capabilities;
  1541. rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
  1542. x86_pmu.intel_cap.capabilities = capabilities;
  1543. }
  1544. intel_ds_init();
  1545. x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
  1546. /*
  1547. * Install the hw-cache-events table:
  1548. */
  1549. switch (boot_cpu_data.x86_model) {
  1550. case 14: /* 65 nm core solo/duo, "Yonah" */
  1551. pr_cont("Core events, ");
  1552. break;
  1553. case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
  1554. x86_add_quirk(intel_clovertown_quirk);
  1555. case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  1556. case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  1557. case 29: /* six-core 45 nm xeon "Dunnington" */
  1558. memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  1559. sizeof(hw_cache_event_ids));
  1560. intel_pmu_lbr_init_core();
  1561. x86_pmu.event_constraints = intel_core2_event_constraints;
  1562. x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
  1563. pr_cont("Core2 events, ");
  1564. break;
  1565. case 26: /* 45 nm nehalem, "Bloomfield" */
  1566. case 30: /* 45 nm nehalem, "Lynnfield" */
  1567. case 46: /* 45 nm nehalem-ex, "Beckton" */
  1568. memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  1569. sizeof(hw_cache_event_ids));
  1570. memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  1571. sizeof(hw_cache_extra_regs));
  1572. intel_pmu_lbr_init_nhm();
  1573. x86_pmu.event_constraints = intel_nehalem_event_constraints;
  1574. x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
  1575. x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  1576. x86_pmu.extra_regs = intel_nehalem_extra_regs;
  1577. /* UOPS_ISSUED.STALLED_CYCLES */
  1578. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  1579. X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
  1580. /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
  1581. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  1582. X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
  1583. x86_add_quirk(intel_nehalem_quirk);
  1584. pr_cont("Nehalem events, ");
  1585. break;
  1586. case 28: /* Atom */
  1587. memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  1588. sizeof(hw_cache_event_ids));
  1589. intel_pmu_lbr_init_atom();
  1590. x86_pmu.event_constraints = intel_gen_event_constraints;
  1591. x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
  1592. pr_cont("Atom events, ");
  1593. break;
  1594. case 37: /* 32 nm nehalem, "Clarkdale" */
  1595. case 44: /* 32 nm nehalem, "Gulftown" */
  1596. case 47: /* 32 nm Xeon E7 */
  1597. memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  1598. sizeof(hw_cache_event_ids));
  1599. memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
  1600. sizeof(hw_cache_extra_regs));
  1601. intel_pmu_lbr_init_nhm();
  1602. x86_pmu.event_constraints = intel_westmere_event_constraints;
  1603. x86_pmu.enable_all = intel_pmu_nhm_enable_all;
  1604. x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
  1605. x86_pmu.extra_regs = intel_westmere_extra_regs;
  1606. x86_pmu.er_flags |= ERF_HAS_RSP_1;
  1607. /* UOPS_ISSUED.STALLED_CYCLES */
  1608. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  1609. X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
  1610. /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
  1611. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  1612. X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
  1613. pr_cont("Westmere events, ");
  1614. break;
  1615. case 42: /* SandyBridge */
  1616. x86_add_quirk(intel_sandybridge_quirk);
  1617. case 45: /* SandyBridge, "Romely-EP" */
  1618. memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
  1619. sizeof(hw_cache_event_ids));
  1620. intel_pmu_lbr_init_snb();
  1621. x86_pmu.event_constraints = intel_snb_event_constraints;
  1622. x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
  1623. x86_pmu.extra_regs = intel_snb_extra_regs;
  1624. /* all extra regs are per-cpu when HT is on */
  1625. x86_pmu.er_flags |= ERF_HAS_RSP_1;
  1626. x86_pmu.er_flags |= ERF_NO_HT_SHARING;
  1627. /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
  1628. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  1629. X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
  1630. /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
  1631. intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  1632. X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
  1633. pr_cont("SandyBridge events, ");
  1634. break;
  1635. default:
  1636. switch (x86_pmu.version) {
  1637. case 1:
  1638. x86_pmu.event_constraints = intel_v1_event_constraints;
  1639. pr_cont("generic architected perfmon v1, ");
  1640. break;
  1641. default:
  1642. /*
  1643. * default constraints for v2 and up
  1644. */
  1645. x86_pmu.event_constraints = intel_gen_event_constraints;
  1646. pr_cont("generic architected perfmon, ");
  1647. break;
  1648. }
  1649. }
  1650. return 0;
  1651. }