perf_event.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. #include <linux/capability.h>
  16. #include <linux/notifier.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/module.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/sched.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/highmem.h>
  24. #include <linux/cpu.h>
  25. #include <linux/bitops.h>
  26. #include <asm/apic.h>
  27. #include <asm/stacktrace.h>
  28. #include <asm/nmi.h>
  29. static u64 perf_event_mask __read_mostly;
  30. /* The maximal number of PEBS events: */
  31. #define MAX_PEBS_EVENTS 4
  32. /* The size of a BTS record in bytes: */
  33. #define BTS_RECORD_SIZE 24
  34. /* The size of a per-cpu BTS buffer in bytes: */
  35. #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
  36. /* The BTS overflow threshold in bytes from the end of the buffer: */
  37. #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
  38. /*
  39. * Bits in the debugctlmsr controlling branch tracing.
  40. */
  41. #define X86_DEBUGCTL_TR (1 << 6)
  42. #define X86_DEBUGCTL_BTS (1 << 7)
  43. #define X86_DEBUGCTL_BTINT (1 << 8)
  44. #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
  45. #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
  46. /*
  47. * A debug store configuration.
  48. *
  49. * We only support architectures that use 64bit fields.
  50. */
  51. struct debug_store {
  52. u64 bts_buffer_base;
  53. u64 bts_index;
  54. u64 bts_absolute_maximum;
  55. u64 bts_interrupt_threshold;
  56. u64 pebs_buffer_base;
  57. u64 pebs_index;
  58. u64 pebs_absolute_maximum;
  59. u64 pebs_interrupt_threshold;
  60. u64 pebs_event_reset[MAX_PEBS_EVENTS];
  61. };
  62. struct event_constraint {
  63. union {
  64. unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  65. u64 idxmsk64[1];
  66. };
  67. int code;
  68. int cmask;
  69. int weight;
  70. };
  71. struct cpu_hw_events {
  72. struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
  73. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  74. unsigned long interrupts;
  75. int enabled;
  76. struct debug_store *ds;
  77. int n_events;
  78. int n_added;
  79. int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
  80. u64 tags[X86_PMC_IDX_MAX];
  81. struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
  82. };
  83. #define __EVENT_CONSTRAINT(c, n, m, w) {\
  84. { .idxmsk64[0] = (n) }, \
  85. .code = (c), \
  86. .cmask = (m), \
  87. .weight = (w), \
  88. }
  89. #define EVENT_CONSTRAINT(c, n, m) \
  90. __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
  91. #define INTEL_EVENT_CONSTRAINT(c, n) \
  92. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
  93. #define FIXED_EVENT_CONSTRAINT(c, n) \
  94. EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
  95. #define EVENT_CONSTRAINT_END \
  96. EVENT_CONSTRAINT(0, 0, 0)
  97. #define for_each_event_constraint(e, c) \
  98. for ((e) = (c); (e)->cmask; (e)++)
  99. /*
  100. * struct x86_pmu - generic x86 pmu
  101. */
  102. struct x86_pmu {
  103. const char *name;
  104. int version;
  105. int (*handle_irq)(struct pt_regs *);
  106. void (*disable_all)(void);
  107. void (*enable_all)(void);
  108. void (*enable)(struct hw_perf_event *, int);
  109. void (*disable)(struct hw_perf_event *, int);
  110. unsigned eventsel;
  111. unsigned perfctr;
  112. u64 (*event_map)(int);
  113. u64 (*raw_event)(u64);
  114. int max_events;
  115. int num_events;
  116. int num_events_fixed;
  117. int event_bits;
  118. u64 event_mask;
  119. int apic;
  120. u64 max_period;
  121. u64 intel_ctrl;
  122. void (*enable_bts)(u64 config);
  123. void (*disable_bts)(void);
  124. struct event_constraint *
  125. (*get_event_constraints)(struct cpu_hw_events *cpuc,
  126. struct perf_event *event);
  127. void (*put_event_constraints)(struct cpu_hw_events *cpuc,
  128. struct perf_event *event);
  129. struct event_constraint *event_constraints;
  130. };
  131. static struct x86_pmu x86_pmu __read_mostly;
  132. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  133. .enabled = 1,
  134. };
  135. static int x86_perf_event_set_period(struct perf_event *event,
  136. struct hw_perf_event *hwc, int idx);
  137. /*
  138. * Not sure about some of these
  139. */
  140. static const u64 p6_perfmon_event_map[] =
  141. {
  142. [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
  143. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  144. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
  145. [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
  146. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  147. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  148. [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
  149. };
  150. static u64 p6_pmu_event_map(int hw_event)
  151. {
  152. return p6_perfmon_event_map[hw_event];
  153. }
  154. /*
  155. * Event setting that is specified not to count anything.
  156. * We use this to effectively disable a counter.
  157. *
  158. * L2_RQSTS with 0 MESI unit mask.
  159. */
  160. #define P6_NOP_EVENT 0x0000002EULL
  161. static u64 p6_pmu_raw_event(u64 hw_event)
  162. {
  163. #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
  164. #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  165. #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
  166. #define P6_EVNTSEL_INV_MASK 0x00800000ULL
  167. #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
  168. #define P6_EVNTSEL_MASK \
  169. (P6_EVNTSEL_EVENT_MASK | \
  170. P6_EVNTSEL_UNIT_MASK | \
  171. P6_EVNTSEL_EDGE_MASK | \
  172. P6_EVNTSEL_INV_MASK | \
  173. P6_EVNTSEL_REG_MASK)
  174. return hw_event & P6_EVNTSEL_MASK;
  175. }
  176. static struct event_constraint intel_p6_event_constraints[] =
  177. {
  178. INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
  179. INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  180. INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
  181. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  182. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  183. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  184. EVENT_CONSTRAINT_END
  185. };
  186. /*
  187. * Intel PerfMon v3. Used on Core2 and later.
  188. */
  189. static const u64 intel_perfmon_event_map[] =
  190. {
  191. [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
  192. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  193. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
  194. [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
  195. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  196. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  197. [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
  198. };
  199. static struct event_constraint intel_core_event_constraints[] =
  200. {
  201. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  202. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  203. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  204. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  205. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  206. INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  207. EVENT_CONSTRAINT_END
  208. };
  209. static struct event_constraint intel_core2_event_constraints[] =
  210. {
  211. FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
  212. FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
  213. INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  214. INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  215. INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  216. INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  217. INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  218. INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  219. INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  220. INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  221. INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  222. EVENT_CONSTRAINT_END
  223. };
  224. static struct event_constraint intel_nehalem_event_constraints[] =
  225. {
  226. FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
  227. FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
  228. INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  229. INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  230. INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  231. INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  232. INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  233. INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  234. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  235. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  236. EVENT_CONSTRAINT_END
  237. };
  238. static struct event_constraint intel_westmere_event_constraints[] =
  239. {
  240. FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
  241. FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
  242. INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  243. INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  244. INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  245. EVENT_CONSTRAINT_END
  246. };
  247. static struct event_constraint intel_gen_event_constraints[] =
  248. {
  249. FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
  250. FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
  251. EVENT_CONSTRAINT_END
  252. };
  253. static u64 intel_pmu_event_map(int hw_event)
  254. {
  255. return intel_perfmon_event_map[hw_event];
  256. }
  257. /*
  258. * Generalized hw caching related hw_event table, filled
  259. * in on a per model basis. A value of 0 means
  260. * 'not supported', -1 means 'hw_event makes no sense on
  261. * this CPU', any other value means the raw hw_event
  262. * ID.
  263. */
  264. #define C(x) PERF_COUNT_HW_CACHE_##x
  265. static u64 __read_mostly hw_cache_event_ids
  266. [PERF_COUNT_HW_CACHE_MAX]
  267. [PERF_COUNT_HW_CACHE_OP_MAX]
  268. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  269. static __initconst u64 westmere_hw_cache_event_ids
  270. [PERF_COUNT_HW_CACHE_MAX]
  271. [PERF_COUNT_HW_CACHE_OP_MAX]
  272. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  273. {
  274. [ C(L1D) ] = {
  275. [ C(OP_READ) ] = {
  276. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  277. [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
  278. },
  279. [ C(OP_WRITE) ] = {
  280. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  281. [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
  282. },
  283. [ C(OP_PREFETCH) ] = {
  284. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  285. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  286. },
  287. },
  288. [ C(L1I ) ] = {
  289. [ C(OP_READ) ] = {
  290. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  291. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  292. },
  293. [ C(OP_WRITE) ] = {
  294. [ C(RESULT_ACCESS) ] = -1,
  295. [ C(RESULT_MISS) ] = -1,
  296. },
  297. [ C(OP_PREFETCH) ] = {
  298. [ C(RESULT_ACCESS) ] = 0x0,
  299. [ C(RESULT_MISS) ] = 0x0,
  300. },
  301. },
  302. [ C(LL ) ] = {
  303. [ C(OP_READ) ] = {
  304. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  305. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  306. },
  307. [ C(OP_WRITE) ] = {
  308. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  309. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  310. },
  311. [ C(OP_PREFETCH) ] = {
  312. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  313. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  314. },
  315. },
  316. [ C(DTLB) ] = {
  317. [ C(OP_READ) ] = {
  318. [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
  319. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  320. },
  321. [ C(OP_WRITE) ] = {
  322. [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
  323. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  324. },
  325. [ C(OP_PREFETCH) ] = {
  326. [ C(RESULT_ACCESS) ] = 0x0,
  327. [ C(RESULT_MISS) ] = 0x0,
  328. },
  329. },
  330. [ C(ITLB) ] = {
  331. [ C(OP_READ) ] = {
  332. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  333. [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
  334. },
  335. [ C(OP_WRITE) ] = {
  336. [ C(RESULT_ACCESS) ] = -1,
  337. [ C(RESULT_MISS) ] = -1,
  338. },
  339. [ C(OP_PREFETCH) ] = {
  340. [ C(RESULT_ACCESS) ] = -1,
  341. [ C(RESULT_MISS) ] = -1,
  342. },
  343. },
  344. [ C(BPU ) ] = {
  345. [ C(OP_READ) ] = {
  346. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  347. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  348. },
  349. [ C(OP_WRITE) ] = {
  350. [ C(RESULT_ACCESS) ] = -1,
  351. [ C(RESULT_MISS) ] = -1,
  352. },
  353. [ C(OP_PREFETCH) ] = {
  354. [ C(RESULT_ACCESS) ] = -1,
  355. [ C(RESULT_MISS) ] = -1,
  356. },
  357. },
  358. };
  359. static __initconst u64 nehalem_hw_cache_event_ids
  360. [PERF_COUNT_HW_CACHE_MAX]
  361. [PERF_COUNT_HW_CACHE_OP_MAX]
  362. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  363. {
  364. [ C(L1D) ] = {
  365. [ C(OP_READ) ] = {
  366. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  367. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  368. },
  369. [ C(OP_WRITE) ] = {
  370. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  371. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  372. },
  373. [ C(OP_PREFETCH) ] = {
  374. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  375. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  376. },
  377. },
  378. [ C(L1I ) ] = {
  379. [ C(OP_READ) ] = {
  380. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  381. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  382. },
  383. [ C(OP_WRITE) ] = {
  384. [ C(RESULT_ACCESS) ] = -1,
  385. [ C(RESULT_MISS) ] = -1,
  386. },
  387. [ C(OP_PREFETCH) ] = {
  388. [ C(RESULT_ACCESS) ] = 0x0,
  389. [ C(RESULT_MISS) ] = 0x0,
  390. },
  391. },
  392. [ C(LL ) ] = {
  393. [ C(OP_READ) ] = {
  394. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  395. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  396. },
  397. [ C(OP_WRITE) ] = {
  398. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  399. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  400. },
  401. [ C(OP_PREFETCH) ] = {
  402. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  403. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  404. },
  405. },
  406. [ C(DTLB) ] = {
  407. [ C(OP_READ) ] = {
  408. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  409. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  410. },
  411. [ C(OP_WRITE) ] = {
  412. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  413. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  414. },
  415. [ C(OP_PREFETCH) ] = {
  416. [ C(RESULT_ACCESS) ] = 0x0,
  417. [ C(RESULT_MISS) ] = 0x0,
  418. },
  419. },
  420. [ C(ITLB) ] = {
  421. [ C(OP_READ) ] = {
  422. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  423. [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
  424. },
  425. [ C(OP_WRITE) ] = {
  426. [ C(RESULT_ACCESS) ] = -1,
  427. [ C(RESULT_MISS) ] = -1,
  428. },
  429. [ C(OP_PREFETCH) ] = {
  430. [ C(RESULT_ACCESS) ] = -1,
  431. [ C(RESULT_MISS) ] = -1,
  432. },
  433. },
  434. [ C(BPU ) ] = {
  435. [ C(OP_READ) ] = {
  436. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  437. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  438. },
  439. [ C(OP_WRITE) ] = {
  440. [ C(RESULT_ACCESS) ] = -1,
  441. [ C(RESULT_MISS) ] = -1,
  442. },
  443. [ C(OP_PREFETCH) ] = {
  444. [ C(RESULT_ACCESS) ] = -1,
  445. [ C(RESULT_MISS) ] = -1,
  446. },
  447. },
  448. };
  449. static __initconst u64 core2_hw_cache_event_ids
  450. [PERF_COUNT_HW_CACHE_MAX]
  451. [PERF_COUNT_HW_CACHE_OP_MAX]
  452. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  453. {
  454. [ C(L1D) ] = {
  455. [ C(OP_READ) ] = {
  456. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  457. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  458. },
  459. [ C(OP_WRITE) ] = {
  460. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  461. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  462. },
  463. [ C(OP_PREFETCH) ] = {
  464. [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
  465. [ C(RESULT_MISS) ] = 0,
  466. },
  467. },
  468. [ C(L1I ) ] = {
  469. [ C(OP_READ) ] = {
  470. [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
  471. [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
  472. },
  473. [ C(OP_WRITE) ] = {
  474. [ C(RESULT_ACCESS) ] = -1,
  475. [ C(RESULT_MISS) ] = -1,
  476. },
  477. [ C(OP_PREFETCH) ] = {
  478. [ C(RESULT_ACCESS) ] = 0,
  479. [ C(RESULT_MISS) ] = 0,
  480. },
  481. },
  482. [ C(LL ) ] = {
  483. [ C(OP_READ) ] = {
  484. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  485. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  486. },
  487. [ C(OP_WRITE) ] = {
  488. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  489. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  490. },
  491. [ C(OP_PREFETCH) ] = {
  492. [ C(RESULT_ACCESS) ] = 0,
  493. [ C(RESULT_MISS) ] = 0,
  494. },
  495. },
  496. [ C(DTLB) ] = {
  497. [ C(OP_READ) ] = {
  498. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  499. [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
  500. },
  501. [ C(OP_WRITE) ] = {
  502. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  503. [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
  504. },
  505. [ C(OP_PREFETCH) ] = {
  506. [ C(RESULT_ACCESS) ] = 0,
  507. [ C(RESULT_MISS) ] = 0,
  508. },
  509. },
  510. [ C(ITLB) ] = {
  511. [ C(OP_READ) ] = {
  512. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  513. [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
  514. },
  515. [ C(OP_WRITE) ] = {
  516. [ C(RESULT_ACCESS) ] = -1,
  517. [ C(RESULT_MISS) ] = -1,
  518. },
  519. [ C(OP_PREFETCH) ] = {
  520. [ C(RESULT_ACCESS) ] = -1,
  521. [ C(RESULT_MISS) ] = -1,
  522. },
  523. },
  524. [ C(BPU ) ] = {
  525. [ C(OP_READ) ] = {
  526. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  527. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  528. },
  529. [ C(OP_WRITE) ] = {
  530. [ C(RESULT_ACCESS) ] = -1,
  531. [ C(RESULT_MISS) ] = -1,
  532. },
  533. [ C(OP_PREFETCH) ] = {
  534. [ C(RESULT_ACCESS) ] = -1,
  535. [ C(RESULT_MISS) ] = -1,
  536. },
  537. },
  538. };
  539. static __initconst u64 atom_hw_cache_event_ids
  540. [PERF_COUNT_HW_CACHE_MAX]
  541. [PERF_COUNT_HW_CACHE_OP_MAX]
  542. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  543. {
  544. [ C(L1D) ] = {
  545. [ C(OP_READ) ] = {
  546. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
  547. [ C(RESULT_MISS) ] = 0,
  548. },
  549. [ C(OP_WRITE) ] = {
  550. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
  551. [ C(RESULT_MISS) ] = 0,
  552. },
  553. [ C(OP_PREFETCH) ] = {
  554. [ C(RESULT_ACCESS) ] = 0x0,
  555. [ C(RESULT_MISS) ] = 0,
  556. },
  557. },
  558. [ C(L1I ) ] = {
  559. [ C(OP_READ) ] = {
  560. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  561. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  562. },
  563. [ C(OP_WRITE) ] = {
  564. [ C(RESULT_ACCESS) ] = -1,
  565. [ C(RESULT_MISS) ] = -1,
  566. },
  567. [ C(OP_PREFETCH) ] = {
  568. [ C(RESULT_ACCESS) ] = 0,
  569. [ C(RESULT_MISS) ] = 0,
  570. },
  571. },
  572. [ C(LL ) ] = {
  573. [ C(OP_READ) ] = {
  574. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  575. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  576. },
  577. [ C(OP_WRITE) ] = {
  578. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  579. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  580. },
  581. [ C(OP_PREFETCH) ] = {
  582. [ C(RESULT_ACCESS) ] = 0,
  583. [ C(RESULT_MISS) ] = 0,
  584. },
  585. },
  586. [ C(DTLB) ] = {
  587. [ C(OP_READ) ] = {
  588. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
  589. [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
  590. },
  591. [ C(OP_WRITE) ] = {
  592. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
  593. [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
  594. },
  595. [ C(OP_PREFETCH) ] = {
  596. [ C(RESULT_ACCESS) ] = 0,
  597. [ C(RESULT_MISS) ] = 0,
  598. },
  599. },
  600. [ C(ITLB) ] = {
  601. [ C(OP_READ) ] = {
  602. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  603. [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
  604. },
  605. [ C(OP_WRITE) ] = {
  606. [ C(RESULT_ACCESS) ] = -1,
  607. [ C(RESULT_MISS) ] = -1,
  608. },
  609. [ C(OP_PREFETCH) ] = {
  610. [ C(RESULT_ACCESS) ] = -1,
  611. [ C(RESULT_MISS) ] = -1,
  612. },
  613. },
  614. [ C(BPU ) ] = {
  615. [ C(OP_READ) ] = {
  616. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  617. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  618. },
  619. [ C(OP_WRITE) ] = {
  620. [ C(RESULT_ACCESS) ] = -1,
  621. [ C(RESULT_MISS) ] = -1,
  622. },
  623. [ C(OP_PREFETCH) ] = {
  624. [ C(RESULT_ACCESS) ] = -1,
  625. [ C(RESULT_MISS) ] = -1,
  626. },
  627. },
  628. };
  629. static u64 intel_pmu_raw_event(u64 hw_event)
  630. {
  631. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  632. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  633. #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
  634. #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
  635. #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
  636. #define CORE_EVNTSEL_MASK \
  637. (INTEL_ARCH_EVTSEL_MASK | \
  638. INTEL_ARCH_UNIT_MASK | \
  639. INTEL_ARCH_EDGE_MASK | \
  640. INTEL_ARCH_INV_MASK | \
  641. INTEL_ARCH_CNT_MASK)
  642. return hw_event & CORE_EVNTSEL_MASK;
  643. }
  644. static __initconst u64 amd_hw_cache_event_ids
  645. [PERF_COUNT_HW_CACHE_MAX]
  646. [PERF_COUNT_HW_CACHE_OP_MAX]
  647. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  648. {
  649. [ C(L1D) ] = {
  650. [ C(OP_READ) ] = {
  651. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  652. [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
  653. },
  654. [ C(OP_WRITE) ] = {
  655. [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
  656. [ C(RESULT_MISS) ] = 0,
  657. },
  658. [ C(OP_PREFETCH) ] = {
  659. [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
  660. [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
  661. },
  662. },
  663. [ C(L1I ) ] = {
  664. [ C(OP_READ) ] = {
  665. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
  666. [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
  667. },
  668. [ C(OP_WRITE) ] = {
  669. [ C(RESULT_ACCESS) ] = -1,
  670. [ C(RESULT_MISS) ] = -1,
  671. },
  672. [ C(OP_PREFETCH) ] = {
  673. [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
  674. [ C(RESULT_MISS) ] = 0,
  675. },
  676. },
  677. [ C(LL ) ] = {
  678. [ C(OP_READ) ] = {
  679. [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
  680. [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
  681. },
  682. [ C(OP_WRITE) ] = {
  683. [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
  684. [ C(RESULT_MISS) ] = 0,
  685. },
  686. [ C(OP_PREFETCH) ] = {
  687. [ C(RESULT_ACCESS) ] = 0,
  688. [ C(RESULT_MISS) ] = 0,
  689. },
  690. },
  691. [ C(DTLB) ] = {
  692. [ C(OP_READ) ] = {
  693. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  694. [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
  695. },
  696. [ C(OP_WRITE) ] = {
  697. [ C(RESULT_ACCESS) ] = 0,
  698. [ C(RESULT_MISS) ] = 0,
  699. },
  700. [ C(OP_PREFETCH) ] = {
  701. [ C(RESULT_ACCESS) ] = 0,
  702. [ C(RESULT_MISS) ] = 0,
  703. },
  704. },
  705. [ C(ITLB) ] = {
  706. [ C(OP_READ) ] = {
  707. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
  708. [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
  709. },
  710. [ C(OP_WRITE) ] = {
  711. [ C(RESULT_ACCESS) ] = -1,
  712. [ C(RESULT_MISS) ] = -1,
  713. },
  714. [ C(OP_PREFETCH) ] = {
  715. [ C(RESULT_ACCESS) ] = -1,
  716. [ C(RESULT_MISS) ] = -1,
  717. },
  718. },
  719. [ C(BPU ) ] = {
  720. [ C(OP_READ) ] = {
  721. [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
  722. [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
  723. },
  724. [ C(OP_WRITE) ] = {
  725. [ C(RESULT_ACCESS) ] = -1,
  726. [ C(RESULT_MISS) ] = -1,
  727. },
  728. [ C(OP_PREFETCH) ] = {
  729. [ C(RESULT_ACCESS) ] = -1,
  730. [ C(RESULT_MISS) ] = -1,
  731. },
  732. },
  733. };
  734. /*
  735. * AMD Performance Monitor K7 and later.
  736. */
  737. static const u64 amd_perfmon_event_map[] =
  738. {
  739. [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
  740. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  741. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
  742. [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
  743. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  744. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  745. };
  746. static u64 amd_pmu_event_map(int hw_event)
  747. {
  748. return amd_perfmon_event_map[hw_event];
  749. }
  750. static u64 amd_pmu_raw_event(u64 hw_event)
  751. {
  752. #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
  753. #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
  754. #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
  755. #define K7_EVNTSEL_INV_MASK 0x000800000ULL
  756. #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
  757. #define K7_EVNTSEL_MASK \
  758. (K7_EVNTSEL_EVENT_MASK | \
  759. K7_EVNTSEL_UNIT_MASK | \
  760. K7_EVNTSEL_EDGE_MASK | \
  761. K7_EVNTSEL_INV_MASK | \
  762. K7_EVNTSEL_REG_MASK)
  763. return hw_event & K7_EVNTSEL_MASK;
  764. }
  765. /*
  766. * Propagate event elapsed time into the generic event.
  767. * Can only be executed on the CPU where the event is active.
  768. * Returns the delta events processed.
  769. */
  770. static u64
  771. x86_perf_event_update(struct perf_event *event,
  772. struct hw_perf_event *hwc, int idx)
  773. {
  774. int shift = 64 - x86_pmu.event_bits;
  775. u64 prev_raw_count, new_raw_count;
  776. s64 delta;
  777. if (idx == X86_PMC_IDX_FIXED_BTS)
  778. return 0;
  779. /*
  780. * Careful: an NMI might modify the previous event value.
  781. *
  782. * Our tactic to handle this is to first atomically read and
  783. * exchange a new raw count - then add that new-prev delta
  784. * count to the generic event atomically:
  785. */
  786. again:
  787. prev_raw_count = atomic64_read(&hwc->prev_count);
  788. rdmsrl(hwc->event_base + idx, new_raw_count);
  789. if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
  790. new_raw_count) != prev_raw_count)
  791. goto again;
  792. /*
  793. * Now we have the new raw value and have updated the prev
  794. * timestamp already. We can now calculate the elapsed delta
  795. * (event-)time and add that to the generic event.
  796. *
  797. * Careful, not all hw sign-extends above the physical width
  798. * of the count.
  799. */
  800. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  801. delta >>= shift;
  802. atomic64_add(delta, &event->count);
  803. atomic64_sub(delta, &hwc->period_left);
  804. return new_raw_count;
  805. }
  806. static atomic_t active_events;
  807. static DEFINE_MUTEX(pmc_reserve_mutex);
  808. static bool reserve_pmc_hardware(void)
  809. {
  810. #ifdef CONFIG_X86_LOCAL_APIC
  811. int i;
  812. if (nmi_watchdog == NMI_LOCAL_APIC)
  813. disable_lapic_nmi_watchdog();
  814. for (i = 0; i < x86_pmu.num_events; i++) {
  815. if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
  816. goto perfctr_fail;
  817. }
  818. for (i = 0; i < x86_pmu.num_events; i++) {
  819. if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
  820. goto eventsel_fail;
  821. }
  822. #endif
  823. return true;
  824. #ifdef CONFIG_X86_LOCAL_APIC
  825. eventsel_fail:
  826. for (i--; i >= 0; i--)
  827. release_evntsel_nmi(x86_pmu.eventsel + i);
  828. i = x86_pmu.num_events;
  829. perfctr_fail:
  830. for (i--; i >= 0; i--)
  831. release_perfctr_nmi(x86_pmu.perfctr + i);
  832. if (nmi_watchdog == NMI_LOCAL_APIC)
  833. enable_lapic_nmi_watchdog();
  834. return false;
  835. #endif
  836. }
  837. static void release_pmc_hardware(void)
  838. {
  839. #ifdef CONFIG_X86_LOCAL_APIC
  840. int i;
  841. for (i = 0; i < x86_pmu.num_events; i++) {
  842. release_perfctr_nmi(x86_pmu.perfctr + i);
  843. release_evntsel_nmi(x86_pmu.eventsel + i);
  844. }
  845. if (nmi_watchdog == NMI_LOCAL_APIC)
  846. enable_lapic_nmi_watchdog();
  847. #endif
  848. }
  849. static inline bool bts_available(void)
  850. {
  851. return x86_pmu.enable_bts != NULL;
  852. }
  853. static inline void init_debug_store_on_cpu(int cpu)
  854. {
  855. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  856. if (!ds)
  857. return;
  858. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
  859. (u32)((u64)(unsigned long)ds),
  860. (u32)((u64)(unsigned long)ds >> 32));
  861. }
  862. static inline void fini_debug_store_on_cpu(int cpu)
  863. {
  864. if (!per_cpu(cpu_hw_events, cpu).ds)
  865. return;
  866. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
  867. }
  868. static void release_bts_hardware(void)
  869. {
  870. int cpu;
  871. if (!bts_available())
  872. return;
  873. get_online_cpus();
  874. for_each_online_cpu(cpu)
  875. fini_debug_store_on_cpu(cpu);
  876. for_each_possible_cpu(cpu) {
  877. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  878. if (!ds)
  879. continue;
  880. per_cpu(cpu_hw_events, cpu).ds = NULL;
  881. kfree((void *)(unsigned long)ds->bts_buffer_base);
  882. kfree(ds);
  883. }
  884. put_online_cpus();
  885. }
  886. static int reserve_bts_hardware(void)
  887. {
  888. int cpu, err = 0;
  889. if (!bts_available())
  890. return 0;
  891. get_online_cpus();
  892. for_each_possible_cpu(cpu) {
  893. struct debug_store *ds;
  894. void *buffer;
  895. err = -ENOMEM;
  896. buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
  897. if (unlikely(!buffer))
  898. break;
  899. ds = kzalloc(sizeof(*ds), GFP_KERNEL);
  900. if (unlikely(!ds)) {
  901. kfree(buffer);
  902. break;
  903. }
  904. ds->bts_buffer_base = (u64)(unsigned long)buffer;
  905. ds->bts_index = ds->bts_buffer_base;
  906. ds->bts_absolute_maximum =
  907. ds->bts_buffer_base + BTS_BUFFER_SIZE;
  908. ds->bts_interrupt_threshold =
  909. ds->bts_absolute_maximum - BTS_OVFL_TH;
  910. per_cpu(cpu_hw_events, cpu).ds = ds;
  911. err = 0;
  912. }
  913. if (err)
  914. release_bts_hardware();
  915. else {
  916. for_each_online_cpu(cpu)
  917. init_debug_store_on_cpu(cpu);
  918. }
  919. put_online_cpus();
  920. return err;
  921. }
  922. static void hw_perf_event_destroy(struct perf_event *event)
  923. {
  924. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  925. release_pmc_hardware();
  926. release_bts_hardware();
  927. mutex_unlock(&pmc_reserve_mutex);
  928. }
  929. }
  930. static inline int x86_pmu_initialized(void)
  931. {
  932. return x86_pmu.handle_irq != NULL;
  933. }
  934. static inline int
  935. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
  936. {
  937. unsigned int cache_type, cache_op, cache_result;
  938. u64 config, val;
  939. config = attr->config;
  940. cache_type = (config >> 0) & 0xff;
  941. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  942. return -EINVAL;
  943. cache_op = (config >> 8) & 0xff;
  944. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  945. return -EINVAL;
  946. cache_result = (config >> 16) & 0xff;
  947. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  948. return -EINVAL;
  949. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  950. if (val == 0)
  951. return -ENOENT;
  952. if (val == -1)
  953. return -EINVAL;
  954. hwc->config |= val;
  955. return 0;
  956. }
  957. static void intel_pmu_enable_bts(u64 config)
  958. {
  959. unsigned long debugctlmsr;
  960. debugctlmsr = get_debugctlmsr();
  961. debugctlmsr |= X86_DEBUGCTL_TR;
  962. debugctlmsr |= X86_DEBUGCTL_BTS;
  963. debugctlmsr |= X86_DEBUGCTL_BTINT;
  964. if (!(config & ARCH_PERFMON_EVENTSEL_OS))
  965. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
  966. if (!(config & ARCH_PERFMON_EVENTSEL_USR))
  967. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
  968. update_debugctlmsr(debugctlmsr);
  969. }
  970. static void intel_pmu_disable_bts(void)
  971. {
  972. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  973. unsigned long debugctlmsr;
  974. if (!cpuc->ds)
  975. return;
  976. debugctlmsr = get_debugctlmsr();
  977. debugctlmsr &=
  978. ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
  979. X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
  980. update_debugctlmsr(debugctlmsr);
  981. }
  982. /*
  983. * Setup the hardware configuration for a given attr_type
  984. */
  985. static int __hw_perf_event_init(struct perf_event *event)
  986. {
  987. struct perf_event_attr *attr = &event->attr;
  988. struct hw_perf_event *hwc = &event->hw;
  989. u64 config;
  990. int err;
  991. if (!x86_pmu_initialized())
  992. return -ENODEV;
  993. err = 0;
  994. if (!atomic_inc_not_zero(&active_events)) {
  995. mutex_lock(&pmc_reserve_mutex);
  996. if (atomic_read(&active_events) == 0) {
  997. if (!reserve_pmc_hardware())
  998. err = -EBUSY;
  999. else
  1000. err = reserve_bts_hardware();
  1001. }
  1002. if (!err)
  1003. atomic_inc(&active_events);
  1004. mutex_unlock(&pmc_reserve_mutex);
  1005. }
  1006. if (err)
  1007. return err;
  1008. event->destroy = hw_perf_event_destroy;
  1009. /*
  1010. * Generate PMC IRQs:
  1011. * (keep 'enabled' bit clear for now)
  1012. */
  1013. hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  1014. hwc->idx = -1;
  1015. hwc->last_cpu = -1;
  1016. hwc->last_tag = ~0ULL;
  1017. /*
  1018. * Count user and OS events unless requested not to.
  1019. */
  1020. if (!attr->exclude_user)
  1021. hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
  1022. if (!attr->exclude_kernel)
  1023. hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
  1024. if (!hwc->sample_period) {
  1025. hwc->sample_period = x86_pmu.max_period;
  1026. hwc->last_period = hwc->sample_period;
  1027. atomic64_set(&hwc->period_left, hwc->sample_period);
  1028. } else {
  1029. /*
  1030. * If we have a PMU initialized but no APIC
  1031. * interrupts, we cannot sample hardware
  1032. * events (user-space has to fall back and
  1033. * sample via a hrtimer based software event):
  1034. */
  1035. if (!x86_pmu.apic)
  1036. return -EOPNOTSUPP;
  1037. }
  1038. /*
  1039. * Raw hw_event type provide the config in the hw_event structure
  1040. */
  1041. if (attr->type == PERF_TYPE_RAW) {
  1042. hwc->config |= x86_pmu.raw_event(attr->config);
  1043. return 0;
  1044. }
  1045. if (attr->type == PERF_TYPE_HW_CACHE)
  1046. return set_ext_hw_attr(hwc, attr);
  1047. if (attr->config >= x86_pmu.max_events)
  1048. return -EINVAL;
  1049. /*
  1050. * The generic map:
  1051. */
  1052. config = x86_pmu.event_map(attr->config);
  1053. if (config == 0)
  1054. return -ENOENT;
  1055. if (config == -1LL)
  1056. return -EINVAL;
  1057. /*
  1058. * Branch tracing:
  1059. */
  1060. if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
  1061. (hwc->sample_period == 1)) {
  1062. /* BTS is not supported by this architecture. */
  1063. if (!bts_available())
  1064. return -EOPNOTSUPP;
  1065. /* BTS is currently only allowed for user-mode. */
  1066. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  1067. return -EOPNOTSUPP;
  1068. }
  1069. hwc->config |= config;
  1070. return 0;
  1071. }
  1072. static void p6_pmu_disable_all(void)
  1073. {
  1074. u64 val;
  1075. /* p6 only has one enable register */
  1076. rdmsrl(MSR_P6_EVNTSEL0, val);
  1077. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  1078. wrmsrl(MSR_P6_EVNTSEL0, val);
  1079. }
  1080. static void intel_pmu_disable_all(void)
  1081. {
  1082. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1083. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  1084. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  1085. intel_pmu_disable_bts();
  1086. }
  1087. static void x86_pmu_disable_all(void)
  1088. {
  1089. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1090. int idx;
  1091. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1092. u64 val;
  1093. if (!test_bit(idx, cpuc->active_mask))
  1094. continue;
  1095. rdmsrl(x86_pmu.eventsel + idx, val);
  1096. if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
  1097. continue;
  1098. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  1099. wrmsrl(x86_pmu.eventsel + idx, val);
  1100. }
  1101. }
  1102. void hw_perf_disable(void)
  1103. {
  1104. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1105. if (!x86_pmu_initialized())
  1106. return;
  1107. if (!cpuc->enabled)
  1108. return;
  1109. cpuc->n_added = 0;
  1110. cpuc->enabled = 0;
  1111. barrier();
  1112. x86_pmu.disable_all();
  1113. }
  1114. static void p6_pmu_enable_all(void)
  1115. {
  1116. unsigned long val;
  1117. /* p6 only has one enable register */
  1118. rdmsrl(MSR_P6_EVNTSEL0, val);
  1119. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1120. wrmsrl(MSR_P6_EVNTSEL0, val);
  1121. }
  1122. static void intel_pmu_enable_all(void)
  1123. {
  1124. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1125. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  1126. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  1127. struct perf_event *event =
  1128. cpuc->events[X86_PMC_IDX_FIXED_BTS];
  1129. if (WARN_ON_ONCE(!event))
  1130. return;
  1131. intel_pmu_enable_bts(event->hw.config);
  1132. }
  1133. }
  1134. static void x86_pmu_enable_all(void)
  1135. {
  1136. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1137. int idx;
  1138. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1139. struct perf_event *event = cpuc->events[idx];
  1140. u64 val;
  1141. if (!test_bit(idx, cpuc->active_mask))
  1142. continue;
  1143. val = event->hw.config;
  1144. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1145. wrmsrl(x86_pmu.eventsel + idx, val);
  1146. }
  1147. }
  1148. static const struct pmu pmu;
  1149. static inline int is_x86_event(struct perf_event *event)
  1150. {
  1151. return event->pmu == &pmu;
  1152. }
  1153. static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  1154. {
  1155. struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
  1156. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  1157. int i, j, w, wmax, num = 0;
  1158. struct hw_perf_event *hwc;
  1159. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  1160. for (i = 0; i < n; i++) {
  1161. constraints[i] =
  1162. x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  1163. }
  1164. /*
  1165. * fastpath, try to reuse previous register
  1166. */
  1167. for (i = 0; i < n; i++) {
  1168. hwc = &cpuc->event_list[i]->hw;
  1169. c = constraints[i];
  1170. /* never assigned */
  1171. if (hwc->idx == -1)
  1172. break;
  1173. /* constraint still honored */
  1174. if (!test_bit(hwc->idx, c->idxmsk))
  1175. break;
  1176. /* not already used */
  1177. if (test_bit(hwc->idx, used_mask))
  1178. break;
  1179. set_bit(hwc->idx, used_mask);
  1180. if (assign)
  1181. assign[i] = hwc->idx;
  1182. }
  1183. if (i == n)
  1184. goto done;
  1185. /*
  1186. * begin slow path
  1187. */
  1188. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  1189. /*
  1190. * weight = number of possible counters
  1191. *
  1192. * 1 = most constrained, only works on one counter
  1193. * wmax = least constrained, works on any counter
  1194. *
  1195. * assign events to counters starting with most
  1196. * constrained events.
  1197. */
  1198. wmax = x86_pmu.num_events;
  1199. /*
  1200. * when fixed event counters are present,
  1201. * wmax is incremented by 1 to account
  1202. * for one more choice
  1203. */
  1204. if (x86_pmu.num_events_fixed)
  1205. wmax++;
  1206. for (w = 1, num = n; num && w <= wmax; w++) {
  1207. /* for each event */
  1208. for (i = 0; num && i < n; i++) {
  1209. c = constraints[i];
  1210. hwc = &cpuc->event_list[i]->hw;
  1211. if (c->weight != w)
  1212. continue;
  1213. for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
  1214. if (!test_bit(j, used_mask))
  1215. break;
  1216. }
  1217. if (j == X86_PMC_IDX_MAX)
  1218. break;
  1219. set_bit(j, used_mask);
  1220. if (assign)
  1221. assign[i] = j;
  1222. num--;
  1223. }
  1224. }
  1225. done:
  1226. /*
  1227. * scheduling failed or is just a simulation,
  1228. * free resources if necessary
  1229. */
  1230. if (!assign || num) {
  1231. for (i = 0; i < n; i++) {
  1232. if (x86_pmu.put_event_constraints)
  1233. x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
  1234. }
  1235. }
  1236. return num ? -ENOSPC : 0;
  1237. }
  1238. /*
  1239. * dogrp: true if must collect siblings events (group)
  1240. * returns total number of events and error code
  1241. */
  1242. static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  1243. {
  1244. struct perf_event *event;
  1245. int n, max_count;
  1246. max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
  1247. /* current number of events already accepted */
  1248. n = cpuc->n_events;
  1249. if (is_x86_event(leader)) {
  1250. if (n >= max_count)
  1251. return -ENOSPC;
  1252. cpuc->event_list[n] = leader;
  1253. n++;
  1254. }
  1255. if (!dogrp)
  1256. return n;
  1257. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  1258. if (!is_x86_event(event) ||
  1259. event->state <= PERF_EVENT_STATE_OFF)
  1260. continue;
  1261. if (n >= max_count)
  1262. return -ENOSPC;
  1263. cpuc->event_list[n] = event;
  1264. n++;
  1265. }
  1266. return n;
  1267. }
  1268. static inline void x86_assign_hw_event(struct perf_event *event,
  1269. struct cpu_hw_events *cpuc, int i)
  1270. {
  1271. struct hw_perf_event *hwc = &event->hw;
  1272. hwc->idx = cpuc->assign[i];
  1273. hwc->last_cpu = smp_processor_id();
  1274. hwc->last_tag = ++cpuc->tags[i];
  1275. if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
  1276. hwc->config_base = 0;
  1277. hwc->event_base = 0;
  1278. } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
  1279. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  1280. /*
  1281. * We set it so that event_base + idx in wrmsr/rdmsr maps to
  1282. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  1283. */
  1284. hwc->event_base =
  1285. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  1286. } else {
  1287. hwc->config_base = x86_pmu.eventsel;
  1288. hwc->event_base = x86_pmu.perfctr;
  1289. }
  1290. }
  1291. static inline int match_prev_assignment(struct hw_perf_event *hwc,
  1292. struct cpu_hw_events *cpuc,
  1293. int i)
  1294. {
  1295. return hwc->idx == cpuc->assign[i] &&
  1296. hwc->last_cpu == smp_processor_id() &&
  1297. hwc->last_tag == cpuc->tags[i];
  1298. }
  1299. static void x86_pmu_stop(struct perf_event *event);
  1300. void hw_perf_enable(void)
  1301. {
  1302. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1303. struct perf_event *event;
  1304. struct hw_perf_event *hwc;
  1305. int i;
  1306. if (!x86_pmu_initialized())
  1307. return;
  1308. if (cpuc->enabled)
  1309. return;
  1310. if (cpuc->n_added) {
  1311. /*
  1312. * apply assignment obtained either from
  1313. * hw_perf_group_sched_in() or x86_pmu_enable()
  1314. *
  1315. * step1: save events moving to new counters
  1316. * step2: reprogram moved events into new counters
  1317. */
  1318. for (i = 0; i < cpuc->n_events; i++) {
  1319. event = cpuc->event_list[i];
  1320. hwc = &event->hw;
  1321. /*
  1322. * we can avoid reprogramming counter if:
  1323. * - assigned same counter as last time
  1324. * - running on same CPU as last time
  1325. * - no other event has used the counter since
  1326. */
  1327. if (hwc->idx == -1 ||
  1328. match_prev_assignment(hwc, cpuc, i))
  1329. continue;
  1330. x86_pmu_stop(event);
  1331. hwc->idx = -1;
  1332. }
  1333. for (i = 0; i < cpuc->n_events; i++) {
  1334. event = cpuc->event_list[i];
  1335. hwc = &event->hw;
  1336. if (hwc->idx == -1) {
  1337. x86_assign_hw_event(event, cpuc, i);
  1338. x86_perf_event_set_period(event, hwc, hwc->idx);
  1339. }
  1340. /*
  1341. * need to mark as active because x86_pmu_disable()
  1342. * clear active_mask and events[] yet it preserves
  1343. * idx
  1344. */
  1345. set_bit(hwc->idx, cpuc->active_mask);
  1346. cpuc->events[hwc->idx] = event;
  1347. x86_pmu.enable(hwc, hwc->idx);
  1348. perf_event_update_userpage(event);
  1349. }
  1350. cpuc->n_added = 0;
  1351. perf_events_lapic_init();
  1352. }
  1353. cpuc->enabled = 1;
  1354. barrier();
  1355. x86_pmu.enable_all();
  1356. }
  1357. static inline u64 intel_pmu_get_status(void)
  1358. {
  1359. u64 status;
  1360. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  1361. return status;
  1362. }
  1363. static inline void intel_pmu_ack_status(u64 ack)
  1364. {
  1365. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  1366. }
  1367. static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1368. {
  1369. (void)checking_wrmsrl(hwc->config_base + idx,
  1370. hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
  1371. }
  1372. static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1373. {
  1374. (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
  1375. }
  1376. static inline void
  1377. intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
  1378. {
  1379. int idx = __idx - X86_PMC_IDX_FIXED;
  1380. u64 ctrl_val, mask;
  1381. mask = 0xfULL << (idx * 4);
  1382. rdmsrl(hwc->config_base, ctrl_val);
  1383. ctrl_val &= ~mask;
  1384. (void)checking_wrmsrl(hwc->config_base, ctrl_val);
  1385. }
  1386. static inline void
  1387. p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1388. {
  1389. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1390. u64 val = P6_NOP_EVENT;
  1391. if (cpuc->enabled)
  1392. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1393. (void)checking_wrmsrl(hwc->config_base + idx, val);
  1394. }
  1395. static inline void
  1396. intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1397. {
  1398. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  1399. intel_pmu_disable_bts();
  1400. return;
  1401. }
  1402. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  1403. intel_pmu_disable_fixed(hwc, idx);
  1404. return;
  1405. }
  1406. x86_pmu_disable_event(hwc, idx);
  1407. }
  1408. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  1409. /*
  1410. * Set the next IRQ period, based on the hwc->period_left value.
  1411. * To be called with the event disabled in hw:
  1412. */
  1413. static int
  1414. x86_perf_event_set_period(struct perf_event *event,
  1415. struct hw_perf_event *hwc, int idx)
  1416. {
  1417. s64 left = atomic64_read(&hwc->period_left);
  1418. s64 period = hwc->sample_period;
  1419. int err, ret = 0;
  1420. if (idx == X86_PMC_IDX_FIXED_BTS)
  1421. return 0;
  1422. /*
  1423. * If we are way outside a reasonable range then just skip forward:
  1424. */
  1425. if (unlikely(left <= -period)) {
  1426. left = period;
  1427. atomic64_set(&hwc->period_left, left);
  1428. hwc->last_period = period;
  1429. ret = 1;
  1430. }
  1431. if (unlikely(left <= 0)) {
  1432. left += period;
  1433. atomic64_set(&hwc->period_left, left);
  1434. hwc->last_period = period;
  1435. ret = 1;
  1436. }
  1437. /*
  1438. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  1439. */
  1440. if (unlikely(left < 2))
  1441. left = 2;
  1442. if (left > x86_pmu.max_period)
  1443. left = x86_pmu.max_period;
  1444. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  1445. /*
  1446. * The hw event starts counting from this event offset,
  1447. * mark it to be able to extra future deltas:
  1448. */
  1449. atomic64_set(&hwc->prev_count, (u64)-left);
  1450. err = checking_wrmsrl(hwc->event_base + idx,
  1451. (u64)(-left) & x86_pmu.event_mask);
  1452. perf_event_update_userpage(event);
  1453. return ret;
  1454. }
  1455. static inline void
  1456. intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
  1457. {
  1458. int idx = __idx - X86_PMC_IDX_FIXED;
  1459. u64 ctrl_val, bits, mask;
  1460. int err;
  1461. /*
  1462. * Enable IRQ generation (0x8),
  1463. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  1464. * if requested:
  1465. */
  1466. bits = 0x8ULL;
  1467. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  1468. bits |= 0x2;
  1469. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  1470. bits |= 0x1;
  1471. /*
  1472. * ANY bit is supported in v3 and up
  1473. */
  1474. if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
  1475. bits |= 0x4;
  1476. bits <<= (idx * 4);
  1477. mask = 0xfULL << (idx * 4);
  1478. rdmsrl(hwc->config_base, ctrl_val);
  1479. ctrl_val &= ~mask;
  1480. ctrl_val |= bits;
  1481. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  1482. }
  1483. static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1484. {
  1485. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1486. u64 val;
  1487. val = hwc->config;
  1488. if (cpuc->enabled)
  1489. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1490. (void)checking_wrmsrl(hwc->config_base + idx, val);
  1491. }
  1492. static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1493. {
  1494. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  1495. if (!__get_cpu_var(cpu_hw_events).enabled)
  1496. return;
  1497. intel_pmu_enable_bts(hwc->config);
  1498. return;
  1499. }
  1500. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  1501. intel_pmu_enable_fixed(hwc, idx);
  1502. return;
  1503. }
  1504. __x86_pmu_enable_event(hwc, idx);
  1505. }
  1506. static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1507. {
  1508. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1509. if (cpuc->enabled)
  1510. __x86_pmu_enable_event(hwc, idx);
  1511. }
  1512. /*
  1513. * activate a single event
  1514. *
  1515. * The event is added to the group of enabled events
  1516. * but only if it can be scehduled with existing events.
  1517. *
  1518. * Called with PMU disabled. If successful and return value 1,
  1519. * then guaranteed to call perf_enable() and hw_perf_enable()
  1520. */
  1521. static int x86_pmu_enable(struct perf_event *event)
  1522. {
  1523. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1524. struct hw_perf_event *hwc;
  1525. int assign[X86_PMC_IDX_MAX];
  1526. int n, n0, ret;
  1527. hwc = &event->hw;
  1528. n0 = cpuc->n_events;
  1529. n = collect_events(cpuc, event, false);
  1530. if (n < 0)
  1531. return n;
  1532. ret = x86_schedule_events(cpuc, n, assign);
  1533. if (ret)
  1534. return ret;
  1535. /*
  1536. * copy new assignment, now we know it is possible
  1537. * will be used by hw_perf_enable()
  1538. */
  1539. memcpy(cpuc->assign, assign, n*sizeof(int));
  1540. cpuc->n_events = n;
  1541. cpuc->n_added = n - n0;
  1542. return 0;
  1543. }
  1544. static int x86_pmu_start(struct perf_event *event)
  1545. {
  1546. struct hw_perf_event *hwc = &event->hw;
  1547. if (hwc->idx == -1)
  1548. return -EAGAIN;
  1549. x86_perf_event_set_period(event, hwc, hwc->idx);
  1550. x86_pmu.enable(hwc, hwc->idx);
  1551. return 0;
  1552. }
  1553. static void x86_pmu_unthrottle(struct perf_event *event)
  1554. {
  1555. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1556. struct hw_perf_event *hwc = &event->hw;
  1557. if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
  1558. cpuc->events[hwc->idx] != event))
  1559. return;
  1560. x86_pmu.enable(hwc, hwc->idx);
  1561. }
  1562. void perf_event_print_debug(void)
  1563. {
  1564. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  1565. struct cpu_hw_events *cpuc;
  1566. unsigned long flags;
  1567. int cpu, idx;
  1568. if (!x86_pmu.num_events)
  1569. return;
  1570. local_irq_save(flags);
  1571. cpu = smp_processor_id();
  1572. cpuc = &per_cpu(cpu_hw_events, cpu);
  1573. if (x86_pmu.version >= 2) {
  1574. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  1575. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  1576. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  1577. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  1578. pr_info("\n");
  1579. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  1580. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  1581. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  1582. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  1583. }
  1584. pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
  1585. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1586. rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
  1587. rdmsrl(x86_pmu.perfctr + idx, pmc_count);
  1588. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  1589. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  1590. cpu, idx, pmc_ctrl);
  1591. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  1592. cpu, idx, pmc_count);
  1593. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  1594. cpu, idx, prev_left);
  1595. }
  1596. for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
  1597. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  1598. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  1599. cpu, idx, pmc_count);
  1600. }
  1601. local_irq_restore(flags);
  1602. }
  1603. static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
  1604. {
  1605. struct debug_store *ds = cpuc->ds;
  1606. struct bts_record {
  1607. u64 from;
  1608. u64 to;
  1609. u64 flags;
  1610. };
  1611. struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
  1612. struct bts_record *at, *top;
  1613. struct perf_output_handle handle;
  1614. struct perf_event_header header;
  1615. struct perf_sample_data data;
  1616. struct pt_regs regs;
  1617. if (!event)
  1618. return;
  1619. if (!ds)
  1620. return;
  1621. at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
  1622. top = (struct bts_record *)(unsigned long)ds->bts_index;
  1623. if (top <= at)
  1624. return;
  1625. ds->bts_index = ds->bts_buffer_base;
  1626. data.period = event->hw.last_period;
  1627. data.addr = 0;
  1628. data.raw = NULL;
  1629. regs.ip = 0;
  1630. /*
  1631. * Prepare a generic sample, i.e. fill in the invariant fields.
  1632. * We will overwrite the from and to address before we output
  1633. * the sample.
  1634. */
  1635. perf_prepare_sample(&header, &data, event, &regs);
  1636. if (perf_output_begin(&handle, event,
  1637. header.size * (top - at), 1, 1))
  1638. return;
  1639. for (; at < top; at++) {
  1640. data.ip = at->from;
  1641. data.addr = at->to;
  1642. perf_output_sample(&handle, &header, &data, event);
  1643. }
  1644. perf_output_end(&handle);
  1645. /* There's new data available. */
  1646. event->hw.interrupts++;
  1647. event->pending_kill = POLL_IN;
  1648. }
  1649. static void x86_pmu_stop(struct perf_event *event)
  1650. {
  1651. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1652. struct hw_perf_event *hwc = &event->hw;
  1653. int idx = hwc->idx;
  1654. /*
  1655. * Must be done before we disable, otherwise the nmi handler
  1656. * could reenable again:
  1657. */
  1658. clear_bit(idx, cpuc->active_mask);
  1659. x86_pmu.disable(hwc, idx);
  1660. /*
  1661. * Drain the remaining delta count out of a event
  1662. * that we are disabling:
  1663. */
  1664. x86_perf_event_update(event, hwc, idx);
  1665. /* Drain the remaining BTS records. */
  1666. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
  1667. intel_pmu_drain_bts_buffer(cpuc);
  1668. cpuc->events[idx] = NULL;
  1669. }
  1670. static void x86_pmu_disable(struct perf_event *event)
  1671. {
  1672. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1673. int i;
  1674. x86_pmu_stop(event);
  1675. for (i = 0; i < cpuc->n_events; i++) {
  1676. if (event == cpuc->event_list[i]) {
  1677. if (x86_pmu.put_event_constraints)
  1678. x86_pmu.put_event_constraints(cpuc, event);
  1679. while (++i < cpuc->n_events)
  1680. cpuc->event_list[i-1] = cpuc->event_list[i];
  1681. --cpuc->n_events;
  1682. break;
  1683. }
  1684. }
  1685. perf_event_update_userpage(event);
  1686. }
  1687. /*
  1688. * Save and restart an expired event. Called by NMI contexts,
  1689. * so it has to be careful about preempting normal event ops:
  1690. */
  1691. static int intel_pmu_save_and_restart(struct perf_event *event)
  1692. {
  1693. struct hw_perf_event *hwc = &event->hw;
  1694. int idx = hwc->idx;
  1695. int ret;
  1696. x86_perf_event_update(event, hwc, idx);
  1697. ret = x86_perf_event_set_period(event, hwc, idx);
  1698. if (event->state == PERF_EVENT_STATE_ACTIVE)
  1699. intel_pmu_enable_event(hwc, idx);
  1700. return ret;
  1701. }
  1702. static void intel_pmu_reset(void)
  1703. {
  1704. struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
  1705. unsigned long flags;
  1706. int idx;
  1707. if (!x86_pmu.num_events)
  1708. return;
  1709. local_irq_save(flags);
  1710. printk("clearing PMU state on CPU#%d\n", smp_processor_id());
  1711. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1712. checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
  1713. checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
  1714. }
  1715. for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
  1716. checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
  1717. }
  1718. if (ds)
  1719. ds->bts_index = ds->bts_buffer_base;
  1720. local_irq_restore(flags);
  1721. }
  1722. /*
  1723. * This handler is triggered by the local APIC, so the APIC IRQ handling
  1724. * rules apply:
  1725. */
  1726. static int intel_pmu_handle_irq(struct pt_regs *regs)
  1727. {
  1728. struct perf_sample_data data;
  1729. struct cpu_hw_events *cpuc;
  1730. int bit, loops;
  1731. u64 ack, status;
  1732. data.addr = 0;
  1733. data.raw = NULL;
  1734. cpuc = &__get_cpu_var(cpu_hw_events);
  1735. perf_disable();
  1736. intel_pmu_drain_bts_buffer(cpuc);
  1737. status = intel_pmu_get_status();
  1738. if (!status) {
  1739. perf_enable();
  1740. return 0;
  1741. }
  1742. loops = 0;
  1743. again:
  1744. if (++loops > 100) {
  1745. WARN_ONCE(1, "perfevents: irq loop stuck!\n");
  1746. perf_event_print_debug();
  1747. intel_pmu_reset();
  1748. perf_enable();
  1749. return 1;
  1750. }
  1751. inc_irq_stat(apic_perf_irqs);
  1752. ack = status;
  1753. for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  1754. struct perf_event *event = cpuc->events[bit];
  1755. clear_bit(bit, (unsigned long *) &status);
  1756. if (!test_bit(bit, cpuc->active_mask))
  1757. continue;
  1758. if (!intel_pmu_save_and_restart(event))
  1759. continue;
  1760. data.period = event->hw.last_period;
  1761. if (perf_event_overflow(event, 1, &data, regs))
  1762. intel_pmu_disable_event(&event->hw, bit);
  1763. }
  1764. intel_pmu_ack_status(ack);
  1765. /*
  1766. * Repeat if there is more work to be done:
  1767. */
  1768. status = intel_pmu_get_status();
  1769. if (status)
  1770. goto again;
  1771. perf_enable();
  1772. return 1;
  1773. }
  1774. static int x86_pmu_handle_irq(struct pt_regs *regs)
  1775. {
  1776. struct perf_sample_data data;
  1777. struct cpu_hw_events *cpuc;
  1778. struct perf_event *event;
  1779. struct hw_perf_event *hwc;
  1780. int idx, handled = 0;
  1781. u64 val;
  1782. data.addr = 0;
  1783. data.raw = NULL;
  1784. cpuc = &__get_cpu_var(cpu_hw_events);
  1785. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1786. if (!test_bit(idx, cpuc->active_mask))
  1787. continue;
  1788. event = cpuc->events[idx];
  1789. hwc = &event->hw;
  1790. val = x86_perf_event_update(event, hwc, idx);
  1791. if (val & (1ULL << (x86_pmu.event_bits - 1)))
  1792. continue;
  1793. /*
  1794. * event overflow
  1795. */
  1796. handled = 1;
  1797. data.period = event->hw.last_period;
  1798. if (!x86_perf_event_set_period(event, hwc, idx))
  1799. continue;
  1800. if (perf_event_overflow(event, 1, &data, regs))
  1801. x86_pmu.disable(hwc, idx);
  1802. }
  1803. if (handled)
  1804. inc_irq_stat(apic_perf_irqs);
  1805. return handled;
  1806. }
  1807. void smp_perf_pending_interrupt(struct pt_regs *regs)
  1808. {
  1809. irq_enter();
  1810. ack_APIC_irq();
  1811. inc_irq_stat(apic_pending_irqs);
  1812. perf_event_do_pending();
  1813. irq_exit();
  1814. }
  1815. void set_perf_event_pending(void)
  1816. {
  1817. #ifdef CONFIG_X86_LOCAL_APIC
  1818. if (!x86_pmu.apic || !x86_pmu_initialized())
  1819. return;
  1820. apic->send_IPI_self(LOCAL_PENDING_VECTOR);
  1821. #endif
  1822. }
  1823. void perf_events_lapic_init(void)
  1824. {
  1825. #ifdef CONFIG_X86_LOCAL_APIC
  1826. if (!x86_pmu.apic || !x86_pmu_initialized())
  1827. return;
  1828. /*
  1829. * Always use NMI for PMU
  1830. */
  1831. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1832. #endif
  1833. }
  1834. static int __kprobes
  1835. perf_event_nmi_handler(struct notifier_block *self,
  1836. unsigned long cmd, void *__args)
  1837. {
  1838. struct die_args *args = __args;
  1839. struct pt_regs *regs;
  1840. if (!atomic_read(&active_events))
  1841. return NOTIFY_DONE;
  1842. switch (cmd) {
  1843. case DIE_NMI:
  1844. case DIE_NMI_IPI:
  1845. break;
  1846. default:
  1847. return NOTIFY_DONE;
  1848. }
  1849. regs = args->regs;
  1850. #ifdef CONFIG_X86_LOCAL_APIC
  1851. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1852. #endif
  1853. /*
  1854. * Can't rely on the handled return value to say it was our NMI, two
  1855. * events could trigger 'simultaneously' raising two back-to-back NMIs.
  1856. *
  1857. * If the first NMI handles both, the latter will be empty and daze
  1858. * the CPU.
  1859. */
  1860. x86_pmu.handle_irq(regs);
  1861. return NOTIFY_STOP;
  1862. }
  1863. static struct event_constraint unconstrained;
  1864. static struct event_constraint bts_constraint =
  1865. EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
  1866. static struct event_constraint *
  1867. intel_special_constraints(struct perf_event *event)
  1868. {
  1869. unsigned int hw_event;
  1870. hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
  1871. if (unlikely((hw_event ==
  1872. x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
  1873. (event->hw.sample_period == 1))) {
  1874. return &bts_constraint;
  1875. }
  1876. return NULL;
  1877. }
  1878. static struct event_constraint *
  1879. intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1880. {
  1881. struct event_constraint *c;
  1882. c = intel_special_constraints(event);
  1883. if (c)
  1884. return c;
  1885. if (x86_pmu.event_constraints) {
  1886. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1887. if ((event->hw.config & c->cmask) == c->code)
  1888. return c;
  1889. }
  1890. }
  1891. return &unconstrained;
  1892. }
  1893. static struct event_constraint *
  1894. amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1895. {
  1896. return &unconstrained;
  1897. }
  1898. static int x86_event_sched_in(struct perf_event *event,
  1899. struct perf_cpu_context *cpuctx, int cpu)
  1900. {
  1901. int ret = 0;
  1902. event->state = PERF_EVENT_STATE_ACTIVE;
  1903. event->oncpu = cpu;
  1904. event->tstamp_running += event->ctx->time - event->tstamp_stopped;
  1905. if (!is_x86_event(event))
  1906. ret = event->pmu->enable(event);
  1907. if (!ret && !is_software_event(event))
  1908. cpuctx->active_oncpu++;
  1909. if (!ret && event->attr.exclusive)
  1910. cpuctx->exclusive = 1;
  1911. return ret;
  1912. }
  1913. static void x86_event_sched_out(struct perf_event *event,
  1914. struct perf_cpu_context *cpuctx, int cpu)
  1915. {
  1916. event->state = PERF_EVENT_STATE_INACTIVE;
  1917. event->oncpu = -1;
  1918. if (!is_x86_event(event))
  1919. event->pmu->disable(event);
  1920. event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
  1921. if (!is_software_event(event))
  1922. cpuctx->active_oncpu--;
  1923. if (event->attr.exclusive || !cpuctx->active_oncpu)
  1924. cpuctx->exclusive = 0;
  1925. }
  1926. /*
  1927. * Called to enable a whole group of events.
  1928. * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
  1929. * Assumes the caller has disabled interrupts and has
  1930. * frozen the PMU with hw_perf_save_disable.
  1931. *
  1932. * called with PMU disabled. If successful and return value 1,
  1933. * then guaranteed to call perf_enable() and hw_perf_enable()
  1934. */
  1935. int hw_perf_group_sched_in(struct perf_event *leader,
  1936. struct perf_cpu_context *cpuctx,
  1937. struct perf_event_context *ctx, int cpu)
  1938. {
  1939. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1940. struct perf_event *sub;
  1941. int assign[X86_PMC_IDX_MAX];
  1942. int n0, n1, ret;
  1943. /* n0 = total number of events */
  1944. n0 = collect_events(cpuc, leader, true);
  1945. if (n0 < 0)
  1946. return n0;
  1947. ret = x86_schedule_events(cpuc, n0, assign);
  1948. if (ret)
  1949. return ret;
  1950. ret = x86_event_sched_in(leader, cpuctx, cpu);
  1951. if (ret)
  1952. return ret;
  1953. n1 = 1;
  1954. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  1955. if (sub->state > PERF_EVENT_STATE_OFF) {
  1956. ret = x86_event_sched_in(sub, cpuctx, cpu);
  1957. if (ret)
  1958. goto undo;
  1959. ++n1;
  1960. }
  1961. }
  1962. /*
  1963. * copy new assignment, now we know it is possible
  1964. * will be used by hw_perf_enable()
  1965. */
  1966. memcpy(cpuc->assign, assign, n0*sizeof(int));
  1967. cpuc->n_events = n0;
  1968. cpuc->n_added = n1;
  1969. ctx->nr_active += n1;
  1970. /*
  1971. * 1 means successful and events are active
  1972. * This is not quite true because we defer
  1973. * actual activation until hw_perf_enable() but
  1974. * this way we* ensure caller won't try to enable
  1975. * individual events
  1976. */
  1977. return 1;
  1978. undo:
  1979. x86_event_sched_out(leader, cpuctx, cpu);
  1980. n0 = 1;
  1981. list_for_each_entry(sub, &leader->sibling_list, group_entry) {
  1982. if (sub->state == PERF_EVENT_STATE_ACTIVE) {
  1983. x86_event_sched_out(sub, cpuctx, cpu);
  1984. if (++n0 == n1)
  1985. break;
  1986. }
  1987. }
  1988. return ret;
  1989. }
  1990. static __read_mostly struct notifier_block perf_event_nmi_notifier = {
  1991. .notifier_call = perf_event_nmi_handler,
  1992. .next = NULL,
  1993. .priority = 1
  1994. };
  1995. static __initconst struct x86_pmu p6_pmu = {
  1996. .name = "p6",
  1997. .handle_irq = x86_pmu_handle_irq,
  1998. .disable_all = p6_pmu_disable_all,
  1999. .enable_all = p6_pmu_enable_all,
  2000. .enable = p6_pmu_enable_event,
  2001. .disable = p6_pmu_disable_event,
  2002. .eventsel = MSR_P6_EVNTSEL0,
  2003. .perfctr = MSR_P6_PERFCTR0,
  2004. .event_map = p6_pmu_event_map,
  2005. .raw_event = p6_pmu_raw_event,
  2006. .max_events = ARRAY_SIZE(p6_perfmon_event_map),
  2007. .apic = 1,
  2008. .max_period = (1ULL << 31) - 1,
  2009. .version = 0,
  2010. .num_events = 2,
  2011. /*
  2012. * Events have 40 bits implemented. However they are designed such
  2013. * that bits [32-39] are sign extensions of bit 31. As such the
  2014. * effective width of a event for P6-like PMU is 32 bits only.
  2015. *
  2016. * See IA-32 Intel Architecture Software developer manual Vol 3B
  2017. */
  2018. .event_bits = 32,
  2019. .event_mask = (1ULL << 32) - 1,
  2020. .get_event_constraints = intel_get_event_constraints,
  2021. .event_constraints = intel_p6_event_constraints
  2022. };
  2023. static __initconst struct x86_pmu core_pmu = {
  2024. .name = "core",
  2025. .handle_irq = x86_pmu_handle_irq,
  2026. .disable_all = x86_pmu_disable_all,
  2027. .enable_all = x86_pmu_enable_all,
  2028. .enable = x86_pmu_enable_event,
  2029. .disable = x86_pmu_disable_event,
  2030. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  2031. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  2032. .event_map = intel_pmu_event_map,
  2033. .raw_event = intel_pmu_raw_event,
  2034. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  2035. .apic = 1,
  2036. /*
  2037. * Intel PMCs cannot be accessed sanely above 32 bit width,
  2038. * so we install an artificial 1<<31 period regardless of
  2039. * the generic event period:
  2040. */
  2041. .max_period = (1ULL << 31) - 1,
  2042. .get_event_constraints = intel_get_event_constraints,
  2043. .event_constraints = intel_core_event_constraints,
  2044. };
  2045. static __initconst struct x86_pmu intel_pmu = {
  2046. .name = "Intel",
  2047. .handle_irq = intel_pmu_handle_irq,
  2048. .disable_all = intel_pmu_disable_all,
  2049. .enable_all = intel_pmu_enable_all,
  2050. .enable = intel_pmu_enable_event,
  2051. .disable = intel_pmu_disable_event,
  2052. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  2053. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  2054. .event_map = intel_pmu_event_map,
  2055. .raw_event = intel_pmu_raw_event,
  2056. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  2057. .apic = 1,
  2058. /*
  2059. * Intel PMCs cannot be accessed sanely above 32 bit width,
  2060. * so we install an artificial 1<<31 period regardless of
  2061. * the generic event period:
  2062. */
  2063. .max_period = (1ULL << 31) - 1,
  2064. .enable_bts = intel_pmu_enable_bts,
  2065. .disable_bts = intel_pmu_disable_bts,
  2066. .get_event_constraints = intel_get_event_constraints
  2067. };
  2068. static __initconst struct x86_pmu amd_pmu = {
  2069. .name = "AMD",
  2070. .handle_irq = x86_pmu_handle_irq,
  2071. .disable_all = x86_pmu_disable_all,
  2072. .enable_all = x86_pmu_enable_all,
  2073. .enable = x86_pmu_enable_event,
  2074. .disable = x86_pmu_disable_event,
  2075. .eventsel = MSR_K7_EVNTSEL0,
  2076. .perfctr = MSR_K7_PERFCTR0,
  2077. .event_map = amd_pmu_event_map,
  2078. .raw_event = amd_pmu_raw_event,
  2079. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  2080. .num_events = 4,
  2081. .event_bits = 48,
  2082. .event_mask = (1ULL << 48) - 1,
  2083. .apic = 1,
  2084. /* use highest bit to detect overflow */
  2085. .max_period = (1ULL << 47) - 1,
  2086. .get_event_constraints = amd_get_event_constraints
  2087. };
  2088. static __init int p6_pmu_init(void)
  2089. {
  2090. switch (boot_cpu_data.x86_model) {
  2091. case 1:
  2092. case 3: /* Pentium Pro */
  2093. case 5:
  2094. case 6: /* Pentium II */
  2095. case 7:
  2096. case 8:
  2097. case 11: /* Pentium III */
  2098. case 9:
  2099. case 13:
  2100. /* Pentium M */
  2101. break;
  2102. default:
  2103. pr_cont("unsupported p6 CPU model %d ",
  2104. boot_cpu_data.x86_model);
  2105. return -ENODEV;
  2106. }
  2107. x86_pmu = p6_pmu;
  2108. return 0;
  2109. }
  2110. static __init int intel_pmu_init(void)
  2111. {
  2112. union cpuid10_edx edx;
  2113. union cpuid10_eax eax;
  2114. unsigned int unused;
  2115. unsigned int ebx;
  2116. int version;
  2117. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  2118. /* check for P6 processor family */
  2119. if (boot_cpu_data.x86 == 6) {
  2120. return p6_pmu_init();
  2121. } else {
  2122. return -ENODEV;
  2123. }
  2124. }
  2125. /*
  2126. * Check whether the Architectural PerfMon supports
  2127. * Branch Misses Retired hw_event or not.
  2128. */
  2129. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  2130. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  2131. return -ENODEV;
  2132. version = eax.split.version_id;
  2133. if (version < 2)
  2134. x86_pmu = core_pmu;
  2135. else
  2136. x86_pmu = intel_pmu;
  2137. x86_pmu.version = version;
  2138. x86_pmu.num_events = eax.split.num_events;
  2139. x86_pmu.event_bits = eax.split.bit_width;
  2140. x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
  2141. /*
  2142. * Quirk: v2 perfmon does not report fixed-purpose events, so
  2143. * assume at least 3 events:
  2144. */
  2145. if (version > 1)
  2146. x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
  2147. /*
  2148. * Install the hw-cache-events table:
  2149. */
  2150. switch (boot_cpu_data.x86_model) {
  2151. case 14: /* 65 nm core solo/duo, "Yonah" */
  2152. pr_cont("Core events, ");
  2153. break;
  2154. case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
  2155. case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  2156. case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  2157. case 29: /* six-core 45 nm xeon "Dunnington" */
  2158. memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  2159. sizeof(hw_cache_event_ids));
  2160. x86_pmu.event_constraints = intel_core2_event_constraints;
  2161. pr_cont("Core2 events, ");
  2162. break;
  2163. case 26: /* 45 nm nehalem, "Bloomfield" */
  2164. case 30: /* 45 nm nehalem, "Lynnfield" */
  2165. memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  2166. sizeof(hw_cache_event_ids));
  2167. x86_pmu.event_constraints = intel_nehalem_event_constraints;
  2168. pr_cont("Nehalem/Corei7 events, ");
  2169. break;
  2170. case 28:
  2171. memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  2172. sizeof(hw_cache_event_ids));
  2173. x86_pmu.event_constraints = intel_gen_event_constraints;
  2174. pr_cont("Atom events, ");
  2175. break;
  2176. case 37: /* 32 nm nehalem, "Clarkdale" */
  2177. case 44: /* 32 nm nehalem, "Gulftown" */
  2178. memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
  2179. sizeof(hw_cache_event_ids));
  2180. x86_pmu.event_constraints = intel_westmere_event_constraints;
  2181. pr_cont("Westmere events, ");
  2182. break;
  2183. default:
  2184. /*
  2185. * default constraints for v2 and up
  2186. */
  2187. x86_pmu.event_constraints = intel_gen_event_constraints;
  2188. pr_cont("generic architected perfmon, ");
  2189. }
  2190. return 0;
  2191. }
  2192. static __init int amd_pmu_init(void)
  2193. {
  2194. /* Performance-monitoring supported from K7 and later: */
  2195. if (boot_cpu_data.x86 < 6)
  2196. return -ENODEV;
  2197. x86_pmu = amd_pmu;
  2198. /* Events are common for all AMDs */
  2199. memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
  2200. sizeof(hw_cache_event_ids));
  2201. return 0;
  2202. }
  2203. static void __init pmu_check_apic(void)
  2204. {
  2205. if (cpu_has_apic)
  2206. return;
  2207. x86_pmu.apic = 0;
  2208. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  2209. pr_info("no hardware sampling interrupt available.\n");
  2210. }
  2211. void __init init_hw_perf_events(void)
  2212. {
  2213. int err;
  2214. pr_info("Performance Events: ");
  2215. switch (boot_cpu_data.x86_vendor) {
  2216. case X86_VENDOR_INTEL:
  2217. err = intel_pmu_init();
  2218. break;
  2219. case X86_VENDOR_AMD:
  2220. err = amd_pmu_init();
  2221. break;
  2222. default:
  2223. return;
  2224. }
  2225. if (err != 0) {
  2226. pr_cont("no PMU driver, software events only.\n");
  2227. return;
  2228. }
  2229. pmu_check_apic();
  2230. pr_cont("%s PMU driver.\n", x86_pmu.name);
  2231. if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
  2232. WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
  2233. x86_pmu.num_events, X86_PMC_MAX_GENERIC);
  2234. x86_pmu.num_events = X86_PMC_MAX_GENERIC;
  2235. }
  2236. perf_event_mask = (1 << x86_pmu.num_events) - 1;
  2237. perf_max_events = x86_pmu.num_events;
  2238. if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
  2239. WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
  2240. x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
  2241. x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
  2242. }
  2243. perf_event_mask |=
  2244. ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
  2245. x86_pmu.intel_ctrl = perf_event_mask;
  2246. perf_events_lapic_init();
  2247. register_die_notifier(&perf_event_nmi_notifier);
  2248. unconstrained = (struct event_constraint)
  2249. __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
  2250. 0, x86_pmu.num_events);
  2251. pr_info("... version: %d\n", x86_pmu.version);
  2252. pr_info("... bit width: %d\n", x86_pmu.event_bits);
  2253. pr_info("... generic registers: %d\n", x86_pmu.num_events);
  2254. pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
  2255. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  2256. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
  2257. pr_info("... event mask: %016Lx\n", perf_event_mask);
  2258. }
  2259. static inline void x86_pmu_read(struct perf_event *event)
  2260. {
  2261. x86_perf_event_update(event, &event->hw, event->hw.idx);
  2262. }
  2263. static const struct pmu pmu = {
  2264. .enable = x86_pmu_enable,
  2265. .disable = x86_pmu_disable,
  2266. .start = x86_pmu_start,
  2267. .stop = x86_pmu_stop,
  2268. .read = x86_pmu_read,
  2269. .unthrottle = x86_pmu_unthrottle,
  2270. };
  2271. /*
  2272. * validate a single event group
  2273. *
  2274. * validation include:
  2275. * - check events are compatible which each other
  2276. * - events do not compete for the same counter
  2277. * - number of events <= number of counters
  2278. *
  2279. * validation ensures the group can be loaded onto the
  2280. * PMU if it was the only group available.
  2281. */
  2282. static int validate_group(struct perf_event *event)
  2283. {
  2284. struct perf_event *leader = event->group_leader;
  2285. struct cpu_hw_events *fake_cpuc;
  2286. int ret, n;
  2287. ret = -ENOMEM;
  2288. fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
  2289. if (!fake_cpuc)
  2290. goto out;
  2291. /*
  2292. * the event is not yet connected with its
  2293. * siblings therefore we must first collect
  2294. * existing siblings, then add the new event
  2295. * before we can simulate the scheduling
  2296. */
  2297. ret = -ENOSPC;
  2298. n = collect_events(fake_cpuc, leader, true);
  2299. if (n < 0)
  2300. goto out_free;
  2301. fake_cpuc->n_events = n;
  2302. n = collect_events(fake_cpuc, event, false);
  2303. if (n < 0)
  2304. goto out_free;
  2305. fake_cpuc->n_events = n;
  2306. ret = x86_schedule_events(fake_cpuc, n, NULL);
  2307. out_free:
  2308. kfree(fake_cpuc);
  2309. out:
  2310. return ret;
  2311. }
  2312. const struct pmu *hw_perf_event_init(struct perf_event *event)
  2313. {
  2314. const struct pmu *tmp;
  2315. int err;
  2316. err = __hw_perf_event_init(event);
  2317. if (!err) {
  2318. /*
  2319. * we temporarily connect event to its pmu
  2320. * such that validate_group() can classify
  2321. * it as an x86 event using is_x86_event()
  2322. */
  2323. tmp = event->pmu;
  2324. event->pmu = &pmu;
  2325. if (event->group_leader != event)
  2326. err = validate_group(event);
  2327. event->pmu = tmp;
  2328. }
  2329. if (err) {
  2330. if (event->destroy)
  2331. event->destroy(event);
  2332. return ERR_PTR(err);
  2333. }
  2334. return &pmu;
  2335. }
  2336. /*
  2337. * callchain support
  2338. */
  2339. static inline
  2340. void callchain_store(struct perf_callchain_entry *entry, u64 ip)
  2341. {
  2342. if (entry->nr < PERF_MAX_STACK_DEPTH)
  2343. entry->ip[entry->nr++] = ip;
  2344. }
  2345. static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
  2346. static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
  2347. static void
  2348. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  2349. {
  2350. /* Ignore warnings */
  2351. }
  2352. static void backtrace_warning(void *data, char *msg)
  2353. {
  2354. /* Ignore warnings */
  2355. }
  2356. static int backtrace_stack(void *data, char *name)
  2357. {
  2358. return 0;
  2359. }
  2360. static void backtrace_address(void *data, unsigned long addr, int reliable)
  2361. {
  2362. struct perf_callchain_entry *entry = data;
  2363. if (reliable)
  2364. callchain_store(entry, addr);
  2365. }
  2366. static const struct stacktrace_ops backtrace_ops = {
  2367. .warning = backtrace_warning,
  2368. .warning_symbol = backtrace_warning_symbol,
  2369. .stack = backtrace_stack,
  2370. .address = backtrace_address,
  2371. .walk_stack = print_context_stack_bp,
  2372. };
  2373. #include "../dumpstack.h"
  2374. static void
  2375. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  2376. {
  2377. callchain_store(entry, PERF_CONTEXT_KERNEL);
  2378. callchain_store(entry, regs->ip);
  2379. dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
  2380. }
  2381. /*
  2382. * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
  2383. */
  2384. static unsigned long
  2385. copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
  2386. {
  2387. unsigned long offset, addr = (unsigned long)from;
  2388. int type = in_nmi() ? KM_NMI : KM_IRQ0;
  2389. unsigned long size, len = 0;
  2390. struct page *page;
  2391. void *map;
  2392. int ret;
  2393. do {
  2394. ret = __get_user_pages_fast(addr, 1, 0, &page);
  2395. if (!ret)
  2396. break;
  2397. offset = addr & (PAGE_SIZE - 1);
  2398. size = min(PAGE_SIZE - offset, n - len);
  2399. map = kmap_atomic(page, type);
  2400. memcpy(to, map+offset, size);
  2401. kunmap_atomic(map, type);
  2402. put_page(page);
  2403. len += size;
  2404. to += size;
  2405. addr += size;
  2406. } while (len < n);
  2407. return len;
  2408. }
  2409. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  2410. {
  2411. unsigned long bytes;
  2412. bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
  2413. return bytes == sizeof(*frame);
  2414. }
  2415. static void
  2416. perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  2417. {
  2418. struct stack_frame frame;
  2419. const void __user *fp;
  2420. if (!user_mode(regs))
  2421. regs = task_pt_regs(current);
  2422. fp = (void __user *)regs->bp;
  2423. callchain_store(entry, PERF_CONTEXT_USER);
  2424. callchain_store(entry, regs->ip);
  2425. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  2426. frame.next_frame = NULL;
  2427. frame.return_address = 0;
  2428. if (!copy_stack_frame(fp, &frame))
  2429. break;
  2430. if ((unsigned long)fp < regs->sp)
  2431. break;
  2432. callchain_store(entry, frame.return_address);
  2433. fp = frame.next_frame;
  2434. }
  2435. }
  2436. static void
  2437. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  2438. {
  2439. int is_user;
  2440. if (!regs)
  2441. return;
  2442. is_user = user_mode(regs);
  2443. if (is_user && current->state != TASK_RUNNING)
  2444. return;
  2445. if (!is_user)
  2446. perf_callchain_kernel(regs, entry);
  2447. if (current->mm)
  2448. perf_callchain_user(regs, entry);
  2449. }
  2450. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  2451. {
  2452. struct perf_callchain_entry *entry;
  2453. if (in_nmi())
  2454. entry = &__get_cpu_var(pmc_nmi_entry);
  2455. else
  2456. entry = &__get_cpu_var(pmc_irq_entry);
  2457. entry->nr = 0;
  2458. perf_do_callchain(regs, entry);
  2459. return entry;
  2460. }
  2461. void hw_perf_event_setup_online(int cpu)
  2462. {
  2463. init_debug_store_on_cpu(cpu);
  2464. }