perf_event.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. *
  11. * For licencing details see kernel-base/COPYING
  12. */
  13. #include <linux/perf_event.h>
  14. #include <linux/capability.h>
  15. #include <linux/notifier.h>
  16. #include <linux/hardirq.h>
  17. #include <linux/kprobes.h>
  18. #include <linux/module.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/sched.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/highmem.h>
  23. #include <linux/cpu.h>
  24. #include <asm/apic.h>
  25. #include <asm/stacktrace.h>
  26. #include <asm/nmi.h>
  27. static u64 perf_event_mask __read_mostly;
  28. /* The maximal number of PEBS events: */
  29. #define MAX_PEBS_EVENTS 4
  30. /* The size of a BTS record in bytes: */
  31. #define BTS_RECORD_SIZE 24
  32. /* The size of a per-cpu BTS buffer in bytes: */
  33. #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
  34. /* The BTS overflow threshold in bytes from the end of the buffer: */
  35. #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
  36. /*
  37. * Bits in the debugctlmsr controlling branch tracing.
  38. */
  39. #define X86_DEBUGCTL_TR (1 << 6)
  40. #define X86_DEBUGCTL_BTS (1 << 7)
  41. #define X86_DEBUGCTL_BTINT (1 << 8)
  42. #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
  43. #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
  44. /*
  45. * A debug store configuration.
  46. *
  47. * We only support architectures that use 64bit fields.
  48. */
  49. struct debug_store {
  50. u64 bts_buffer_base;
  51. u64 bts_index;
  52. u64 bts_absolute_maximum;
  53. u64 bts_interrupt_threshold;
  54. u64 pebs_buffer_base;
  55. u64 pebs_index;
  56. u64 pebs_absolute_maximum;
  57. u64 pebs_interrupt_threshold;
  58. u64 pebs_event_reset[MAX_PEBS_EVENTS];
  59. };
  60. struct cpu_hw_events {
  61. struct perf_event *events[X86_PMC_IDX_MAX];
  62. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  63. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  64. unsigned long interrupts;
  65. int enabled;
  66. struct debug_store *ds;
  67. };
  68. struct event_constraint {
  69. unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  70. int code;
  71. };
  72. #define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
  73. #define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
  74. #define for_each_event_constraint(e, c) \
  75. for ((e) = (c); (e)->idxmsk[0]; (e)++)
  76. /*
  77. * struct x86_pmu - generic x86 pmu
  78. */
  79. struct x86_pmu {
  80. const char *name;
  81. int version;
  82. int (*handle_irq)(struct pt_regs *);
  83. void (*disable_all)(void);
  84. void (*enable_all)(void);
  85. void (*enable)(struct hw_perf_event *, int);
  86. void (*disable)(struct hw_perf_event *, int);
  87. unsigned eventsel;
  88. unsigned perfctr;
  89. u64 (*event_map)(int);
  90. u64 (*raw_event)(u64);
  91. int max_events;
  92. int num_events;
  93. int num_events_fixed;
  94. int event_bits;
  95. u64 event_mask;
  96. int apic;
  97. u64 max_period;
  98. u64 intel_ctrl;
  99. void (*enable_bts)(u64 config);
  100. void (*disable_bts)(void);
  101. int (*get_event_idx)(struct cpu_hw_events *cpuc,
  102. struct hw_perf_event *hwc);
  103. };
  104. static struct x86_pmu x86_pmu __read_mostly;
  105. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  106. .enabled = 1,
  107. };
  108. static const struct event_constraint *event_constraints;
  109. /*
  110. * Not sure about some of these
  111. */
  112. static const u64 p6_perfmon_event_map[] =
  113. {
  114. [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
  115. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  116. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
  117. [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
  118. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  119. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  120. [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
  121. };
  122. static u64 p6_pmu_event_map(int hw_event)
  123. {
  124. return p6_perfmon_event_map[hw_event];
  125. }
  126. /*
  127. * Event setting that is specified not to count anything.
  128. * We use this to effectively disable a counter.
  129. *
  130. * L2_RQSTS with 0 MESI unit mask.
  131. */
  132. #define P6_NOP_EVENT 0x0000002EULL
  133. static u64 p6_pmu_raw_event(u64 hw_event)
  134. {
  135. #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
  136. #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  137. #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
  138. #define P6_EVNTSEL_INV_MASK 0x00800000ULL
  139. #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
  140. #define P6_EVNTSEL_MASK \
  141. (P6_EVNTSEL_EVENT_MASK | \
  142. P6_EVNTSEL_UNIT_MASK | \
  143. P6_EVNTSEL_EDGE_MASK | \
  144. P6_EVNTSEL_INV_MASK | \
  145. P6_EVNTSEL_REG_MASK)
  146. return hw_event & P6_EVNTSEL_MASK;
  147. }
  148. static const struct event_constraint intel_p6_event_constraints[] =
  149. {
  150. EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
  151. EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  152. EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
  153. EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  154. EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  155. EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  156. EVENT_CONSTRAINT_END
  157. };
  158. /*
  159. * Intel PerfMon v3. Used on Core2 and later.
  160. */
  161. static const u64 intel_perfmon_event_map[] =
  162. {
  163. [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
  164. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  165. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
  166. [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
  167. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  168. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  169. [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
  170. };
  171. static const struct event_constraint intel_core_event_constraints[] =
  172. {
  173. EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  174. EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  175. EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  176. EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  177. EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  178. EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  179. EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  180. EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  181. EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  182. EVENT_CONSTRAINT_END
  183. };
  184. static const struct event_constraint intel_nehalem_event_constraints[] =
  185. {
  186. EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  187. EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  188. EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  189. EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  190. EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  191. EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
  192. EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  193. EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
  194. EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
  195. EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
  196. EVENT_CONSTRAINT_END
  197. };
  198. static u64 intel_pmu_event_map(int hw_event)
  199. {
  200. return intel_perfmon_event_map[hw_event];
  201. }
  202. /*
  203. * Generalized hw caching related hw_event table, filled
  204. * in on a per model basis. A value of 0 means
  205. * 'not supported', -1 means 'hw_event makes no sense on
  206. * this CPU', any other value means the raw hw_event
  207. * ID.
  208. */
  209. #define C(x) PERF_COUNT_HW_CACHE_##x
  210. static u64 __read_mostly hw_cache_event_ids
  211. [PERF_COUNT_HW_CACHE_MAX]
  212. [PERF_COUNT_HW_CACHE_OP_MAX]
  213. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  214. static __initconst u64 nehalem_hw_cache_event_ids
  215. [PERF_COUNT_HW_CACHE_MAX]
  216. [PERF_COUNT_HW_CACHE_OP_MAX]
  217. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  218. {
  219. [ C(L1D) ] = {
  220. [ C(OP_READ) ] = {
  221. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  222. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  223. },
  224. [ C(OP_WRITE) ] = {
  225. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  226. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  227. },
  228. [ C(OP_PREFETCH) ] = {
  229. [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
  230. [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
  231. },
  232. },
  233. [ C(L1I ) ] = {
  234. [ C(OP_READ) ] = {
  235. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  236. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  237. },
  238. [ C(OP_WRITE) ] = {
  239. [ C(RESULT_ACCESS) ] = -1,
  240. [ C(RESULT_MISS) ] = -1,
  241. },
  242. [ C(OP_PREFETCH) ] = {
  243. [ C(RESULT_ACCESS) ] = 0x0,
  244. [ C(RESULT_MISS) ] = 0x0,
  245. },
  246. },
  247. [ C(LL ) ] = {
  248. [ C(OP_READ) ] = {
  249. [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
  250. [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
  251. },
  252. [ C(OP_WRITE) ] = {
  253. [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
  254. [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
  255. },
  256. [ C(OP_PREFETCH) ] = {
  257. [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
  258. [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
  259. },
  260. },
  261. [ C(DTLB) ] = {
  262. [ C(OP_READ) ] = {
  263. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  264. [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
  265. },
  266. [ C(OP_WRITE) ] = {
  267. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  268. [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
  269. },
  270. [ C(OP_PREFETCH) ] = {
  271. [ C(RESULT_ACCESS) ] = 0x0,
  272. [ C(RESULT_MISS) ] = 0x0,
  273. },
  274. },
  275. [ C(ITLB) ] = {
  276. [ C(OP_READ) ] = {
  277. [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
  278. [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
  279. },
  280. [ C(OP_WRITE) ] = {
  281. [ C(RESULT_ACCESS) ] = -1,
  282. [ C(RESULT_MISS) ] = -1,
  283. },
  284. [ C(OP_PREFETCH) ] = {
  285. [ C(RESULT_ACCESS) ] = -1,
  286. [ C(RESULT_MISS) ] = -1,
  287. },
  288. },
  289. [ C(BPU ) ] = {
  290. [ C(OP_READ) ] = {
  291. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
  292. [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
  293. },
  294. [ C(OP_WRITE) ] = {
  295. [ C(RESULT_ACCESS) ] = -1,
  296. [ C(RESULT_MISS) ] = -1,
  297. },
  298. [ C(OP_PREFETCH) ] = {
  299. [ C(RESULT_ACCESS) ] = -1,
  300. [ C(RESULT_MISS) ] = -1,
  301. },
  302. },
  303. };
  304. static __initconst u64 core2_hw_cache_event_ids
  305. [PERF_COUNT_HW_CACHE_MAX]
  306. [PERF_COUNT_HW_CACHE_OP_MAX]
  307. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  308. {
  309. [ C(L1D) ] = {
  310. [ C(OP_READ) ] = {
  311. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
  312. [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
  313. },
  314. [ C(OP_WRITE) ] = {
  315. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
  316. [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
  317. },
  318. [ C(OP_PREFETCH) ] = {
  319. [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
  320. [ C(RESULT_MISS) ] = 0,
  321. },
  322. },
  323. [ C(L1I ) ] = {
  324. [ C(OP_READ) ] = {
  325. [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
  326. [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
  327. },
  328. [ C(OP_WRITE) ] = {
  329. [ C(RESULT_ACCESS) ] = -1,
  330. [ C(RESULT_MISS) ] = -1,
  331. },
  332. [ C(OP_PREFETCH) ] = {
  333. [ C(RESULT_ACCESS) ] = 0,
  334. [ C(RESULT_MISS) ] = 0,
  335. },
  336. },
  337. [ C(LL ) ] = {
  338. [ C(OP_READ) ] = {
  339. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  340. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  341. },
  342. [ C(OP_WRITE) ] = {
  343. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  344. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  345. },
  346. [ C(OP_PREFETCH) ] = {
  347. [ C(RESULT_ACCESS) ] = 0,
  348. [ C(RESULT_MISS) ] = 0,
  349. },
  350. },
  351. [ C(DTLB) ] = {
  352. [ C(OP_READ) ] = {
  353. [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
  354. [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
  355. },
  356. [ C(OP_WRITE) ] = {
  357. [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
  358. [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
  359. },
  360. [ C(OP_PREFETCH) ] = {
  361. [ C(RESULT_ACCESS) ] = 0,
  362. [ C(RESULT_MISS) ] = 0,
  363. },
  364. },
  365. [ C(ITLB) ] = {
  366. [ C(OP_READ) ] = {
  367. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  368. [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
  369. },
  370. [ C(OP_WRITE) ] = {
  371. [ C(RESULT_ACCESS) ] = -1,
  372. [ C(RESULT_MISS) ] = -1,
  373. },
  374. [ C(OP_PREFETCH) ] = {
  375. [ C(RESULT_ACCESS) ] = -1,
  376. [ C(RESULT_MISS) ] = -1,
  377. },
  378. },
  379. [ C(BPU ) ] = {
  380. [ C(OP_READ) ] = {
  381. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  382. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  383. },
  384. [ C(OP_WRITE) ] = {
  385. [ C(RESULT_ACCESS) ] = -1,
  386. [ C(RESULT_MISS) ] = -1,
  387. },
  388. [ C(OP_PREFETCH) ] = {
  389. [ C(RESULT_ACCESS) ] = -1,
  390. [ C(RESULT_MISS) ] = -1,
  391. },
  392. },
  393. };
  394. static __initconst u64 atom_hw_cache_event_ids
  395. [PERF_COUNT_HW_CACHE_MAX]
  396. [PERF_COUNT_HW_CACHE_OP_MAX]
  397. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  398. {
  399. [ C(L1D) ] = {
  400. [ C(OP_READ) ] = {
  401. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
  402. [ C(RESULT_MISS) ] = 0,
  403. },
  404. [ C(OP_WRITE) ] = {
  405. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
  406. [ C(RESULT_MISS) ] = 0,
  407. },
  408. [ C(OP_PREFETCH) ] = {
  409. [ C(RESULT_ACCESS) ] = 0x0,
  410. [ C(RESULT_MISS) ] = 0,
  411. },
  412. },
  413. [ C(L1I ) ] = {
  414. [ C(OP_READ) ] = {
  415. [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
  416. [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
  417. },
  418. [ C(OP_WRITE) ] = {
  419. [ C(RESULT_ACCESS) ] = -1,
  420. [ C(RESULT_MISS) ] = -1,
  421. },
  422. [ C(OP_PREFETCH) ] = {
  423. [ C(RESULT_ACCESS) ] = 0,
  424. [ C(RESULT_MISS) ] = 0,
  425. },
  426. },
  427. [ C(LL ) ] = {
  428. [ C(OP_READ) ] = {
  429. [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
  430. [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
  431. },
  432. [ C(OP_WRITE) ] = {
  433. [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
  434. [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
  435. },
  436. [ C(OP_PREFETCH) ] = {
  437. [ C(RESULT_ACCESS) ] = 0,
  438. [ C(RESULT_MISS) ] = 0,
  439. },
  440. },
  441. [ C(DTLB) ] = {
  442. [ C(OP_READ) ] = {
  443. [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
  444. [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
  445. },
  446. [ C(OP_WRITE) ] = {
  447. [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
  448. [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
  449. },
  450. [ C(OP_PREFETCH) ] = {
  451. [ C(RESULT_ACCESS) ] = 0,
  452. [ C(RESULT_MISS) ] = 0,
  453. },
  454. },
  455. [ C(ITLB) ] = {
  456. [ C(OP_READ) ] = {
  457. [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
  458. [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
  459. },
  460. [ C(OP_WRITE) ] = {
  461. [ C(RESULT_ACCESS) ] = -1,
  462. [ C(RESULT_MISS) ] = -1,
  463. },
  464. [ C(OP_PREFETCH) ] = {
  465. [ C(RESULT_ACCESS) ] = -1,
  466. [ C(RESULT_MISS) ] = -1,
  467. },
  468. },
  469. [ C(BPU ) ] = {
  470. [ C(OP_READ) ] = {
  471. [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
  472. [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
  473. },
  474. [ C(OP_WRITE) ] = {
  475. [ C(RESULT_ACCESS) ] = -1,
  476. [ C(RESULT_MISS) ] = -1,
  477. },
  478. [ C(OP_PREFETCH) ] = {
  479. [ C(RESULT_ACCESS) ] = -1,
  480. [ C(RESULT_MISS) ] = -1,
  481. },
  482. },
  483. };
  484. static u64 intel_pmu_raw_event(u64 hw_event)
  485. {
  486. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  487. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  488. #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
  489. #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
  490. #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
  491. #define CORE_EVNTSEL_MASK \
  492. (CORE_EVNTSEL_EVENT_MASK | \
  493. CORE_EVNTSEL_UNIT_MASK | \
  494. CORE_EVNTSEL_EDGE_MASK | \
  495. CORE_EVNTSEL_INV_MASK | \
  496. CORE_EVNTSEL_REG_MASK)
  497. return hw_event & CORE_EVNTSEL_MASK;
  498. }
  499. static __initconst u64 amd_hw_cache_event_ids
  500. [PERF_COUNT_HW_CACHE_MAX]
  501. [PERF_COUNT_HW_CACHE_OP_MAX]
  502. [PERF_COUNT_HW_CACHE_RESULT_MAX] =
  503. {
  504. [ C(L1D) ] = {
  505. [ C(OP_READ) ] = {
  506. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  507. [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
  508. },
  509. [ C(OP_WRITE) ] = {
  510. [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
  511. [ C(RESULT_MISS) ] = 0,
  512. },
  513. [ C(OP_PREFETCH) ] = {
  514. [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
  515. [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
  516. },
  517. },
  518. [ C(L1I ) ] = {
  519. [ C(OP_READ) ] = {
  520. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
  521. [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
  522. },
  523. [ C(OP_WRITE) ] = {
  524. [ C(RESULT_ACCESS) ] = -1,
  525. [ C(RESULT_MISS) ] = -1,
  526. },
  527. [ C(OP_PREFETCH) ] = {
  528. [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
  529. [ C(RESULT_MISS) ] = 0,
  530. },
  531. },
  532. [ C(LL ) ] = {
  533. [ C(OP_READ) ] = {
  534. [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
  535. [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
  536. },
  537. [ C(OP_WRITE) ] = {
  538. [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
  539. [ C(RESULT_MISS) ] = 0,
  540. },
  541. [ C(OP_PREFETCH) ] = {
  542. [ C(RESULT_ACCESS) ] = 0,
  543. [ C(RESULT_MISS) ] = 0,
  544. },
  545. },
  546. [ C(DTLB) ] = {
  547. [ C(OP_READ) ] = {
  548. [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
  549. [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
  550. },
  551. [ C(OP_WRITE) ] = {
  552. [ C(RESULT_ACCESS) ] = 0,
  553. [ C(RESULT_MISS) ] = 0,
  554. },
  555. [ C(OP_PREFETCH) ] = {
  556. [ C(RESULT_ACCESS) ] = 0,
  557. [ C(RESULT_MISS) ] = 0,
  558. },
  559. },
  560. [ C(ITLB) ] = {
  561. [ C(OP_READ) ] = {
  562. [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
  563. [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
  564. },
  565. [ C(OP_WRITE) ] = {
  566. [ C(RESULT_ACCESS) ] = -1,
  567. [ C(RESULT_MISS) ] = -1,
  568. },
  569. [ C(OP_PREFETCH) ] = {
  570. [ C(RESULT_ACCESS) ] = -1,
  571. [ C(RESULT_MISS) ] = -1,
  572. },
  573. },
  574. [ C(BPU ) ] = {
  575. [ C(OP_READ) ] = {
  576. [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
  577. [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
  578. },
  579. [ C(OP_WRITE) ] = {
  580. [ C(RESULT_ACCESS) ] = -1,
  581. [ C(RESULT_MISS) ] = -1,
  582. },
  583. [ C(OP_PREFETCH) ] = {
  584. [ C(RESULT_ACCESS) ] = -1,
  585. [ C(RESULT_MISS) ] = -1,
  586. },
  587. },
  588. };
  589. /*
  590. * AMD Performance Monitor K7 and later.
  591. */
  592. static const u64 amd_perfmon_event_map[] =
  593. {
  594. [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
  595. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  596. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
  597. [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
  598. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
  599. [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
  600. };
  601. static u64 amd_pmu_event_map(int hw_event)
  602. {
  603. return amd_perfmon_event_map[hw_event];
  604. }
  605. static u64 amd_pmu_raw_event(u64 hw_event)
  606. {
  607. #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
  608. #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
  609. #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
  610. #define K7_EVNTSEL_INV_MASK 0x000800000ULL
  611. #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
  612. #define K7_EVNTSEL_MASK \
  613. (K7_EVNTSEL_EVENT_MASK | \
  614. K7_EVNTSEL_UNIT_MASK | \
  615. K7_EVNTSEL_EDGE_MASK | \
  616. K7_EVNTSEL_INV_MASK | \
  617. K7_EVNTSEL_REG_MASK)
  618. return hw_event & K7_EVNTSEL_MASK;
  619. }
  620. /*
  621. * Propagate event elapsed time into the generic event.
  622. * Can only be executed on the CPU where the event is active.
  623. * Returns the delta events processed.
  624. */
  625. static u64
  626. x86_perf_event_update(struct perf_event *event,
  627. struct hw_perf_event *hwc, int idx)
  628. {
  629. int shift = 64 - x86_pmu.event_bits;
  630. u64 prev_raw_count, new_raw_count;
  631. s64 delta;
  632. if (idx == X86_PMC_IDX_FIXED_BTS)
  633. return 0;
  634. /*
  635. * Careful: an NMI might modify the previous event value.
  636. *
  637. * Our tactic to handle this is to first atomically read and
  638. * exchange a new raw count - then add that new-prev delta
  639. * count to the generic event atomically:
  640. */
  641. again:
  642. prev_raw_count = atomic64_read(&hwc->prev_count);
  643. rdmsrl(hwc->event_base + idx, new_raw_count);
  644. if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
  645. new_raw_count) != prev_raw_count)
  646. goto again;
  647. /*
  648. * Now we have the new raw value and have updated the prev
  649. * timestamp already. We can now calculate the elapsed delta
  650. * (event-)time and add that to the generic event.
  651. *
  652. * Careful, not all hw sign-extends above the physical width
  653. * of the count.
  654. */
  655. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  656. delta >>= shift;
  657. atomic64_add(delta, &event->count);
  658. atomic64_sub(delta, &hwc->period_left);
  659. return new_raw_count;
  660. }
  661. static atomic_t active_events;
  662. static DEFINE_MUTEX(pmc_reserve_mutex);
  663. static bool reserve_pmc_hardware(void)
  664. {
  665. #ifdef CONFIG_X86_LOCAL_APIC
  666. int i;
  667. if (nmi_watchdog == NMI_LOCAL_APIC)
  668. disable_lapic_nmi_watchdog();
  669. for (i = 0; i < x86_pmu.num_events; i++) {
  670. if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
  671. goto perfctr_fail;
  672. }
  673. for (i = 0; i < x86_pmu.num_events; i++) {
  674. if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
  675. goto eventsel_fail;
  676. }
  677. #endif
  678. return true;
  679. #ifdef CONFIG_X86_LOCAL_APIC
  680. eventsel_fail:
  681. for (i--; i >= 0; i--)
  682. release_evntsel_nmi(x86_pmu.eventsel + i);
  683. i = x86_pmu.num_events;
  684. perfctr_fail:
  685. for (i--; i >= 0; i--)
  686. release_perfctr_nmi(x86_pmu.perfctr + i);
  687. if (nmi_watchdog == NMI_LOCAL_APIC)
  688. enable_lapic_nmi_watchdog();
  689. return false;
  690. #endif
  691. }
  692. static void release_pmc_hardware(void)
  693. {
  694. #ifdef CONFIG_X86_LOCAL_APIC
  695. int i;
  696. for (i = 0; i < x86_pmu.num_events; i++) {
  697. release_perfctr_nmi(x86_pmu.perfctr + i);
  698. release_evntsel_nmi(x86_pmu.eventsel + i);
  699. }
  700. if (nmi_watchdog == NMI_LOCAL_APIC)
  701. enable_lapic_nmi_watchdog();
  702. #endif
  703. }
  704. static inline bool bts_available(void)
  705. {
  706. return x86_pmu.enable_bts != NULL;
  707. }
  708. static inline void init_debug_store_on_cpu(int cpu)
  709. {
  710. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  711. if (!ds)
  712. return;
  713. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
  714. (u32)((u64)(unsigned long)ds),
  715. (u32)((u64)(unsigned long)ds >> 32));
  716. }
  717. static inline void fini_debug_store_on_cpu(int cpu)
  718. {
  719. if (!per_cpu(cpu_hw_events, cpu).ds)
  720. return;
  721. wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
  722. }
  723. static void release_bts_hardware(void)
  724. {
  725. int cpu;
  726. if (!bts_available())
  727. return;
  728. get_online_cpus();
  729. for_each_online_cpu(cpu)
  730. fini_debug_store_on_cpu(cpu);
  731. for_each_possible_cpu(cpu) {
  732. struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
  733. if (!ds)
  734. continue;
  735. per_cpu(cpu_hw_events, cpu).ds = NULL;
  736. kfree((void *)(unsigned long)ds->bts_buffer_base);
  737. kfree(ds);
  738. }
  739. put_online_cpus();
  740. }
  741. static int reserve_bts_hardware(void)
  742. {
  743. int cpu, err = 0;
  744. if (!bts_available())
  745. return 0;
  746. get_online_cpus();
  747. for_each_possible_cpu(cpu) {
  748. struct debug_store *ds;
  749. void *buffer;
  750. err = -ENOMEM;
  751. buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
  752. if (unlikely(!buffer))
  753. break;
  754. ds = kzalloc(sizeof(*ds), GFP_KERNEL);
  755. if (unlikely(!ds)) {
  756. kfree(buffer);
  757. break;
  758. }
  759. ds->bts_buffer_base = (u64)(unsigned long)buffer;
  760. ds->bts_index = ds->bts_buffer_base;
  761. ds->bts_absolute_maximum =
  762. ds->bts_buffer_base + BTS_BUFFER_SIZE;
  763. ds->bts_interrupt_threshold =
  764. ds->bts_absolute_maximum - BTS_OVFL_TH;
  765. per_cpu(cpu_hw_events, cpu).ds = ds;
  766. err = 0;
  767. }
  768. if (err)
  769. release_bts_hardware();
  770. else {
  771. for_each_online_cpu(cpu)
  772. init_debug_store_on_cpu(cpu);
  773. }
  774. put_online_cpus();
  775. return err;
  776. }
  777. static void hw_perf_event_destroy(struct perf_event *event)
  778. {
  779. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  780. release_pmc_hardware();
  781. release_bts_hardware();
  782. mutex_unlock(&pmc_reserve_mutex);
  783. }
  784. }
  785. static inline int x86_pmu_initialized(void)
  786. {
  787. return x86_pmu.handle_irq != NULL;
  788. }
  789. static inline int
  790. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
  791. {
  792. unsigned int cache_type, cache_op, cache_result;
  793. u64 config, val;
  794. config = attr->config;
  795. cache_type = (config >> 0) & 0xff;
  796. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  797. return -EINVAL;
  798. cache_op = (config >> 8) & 0xff;
  799. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  800. return -EINVAL;
  801. cache_result = (config >> 16) & 0xff;
  802. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  803. return -EINVAL;
  804. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  805. if (val == 0)
  806. return -ENOENT;
  807. if (val == -1)
  808. return -EINVAL;
  809. hwc->config |= val;
  810. return 0;
  811. }
  812. static void intel_pmu_enable_bts(u64 config)
  813. {
  814. unsigned long debugctlmsr;
  815. debugctlmsr = get_debugctlmsr();
  816. debugctlmsr |= X86_DEBUGCTL_TR;
  817. debugctlmsr |= X86_DEBUGCTL_BTS;
  818. debugctlmsr |= X86_DEBUGCTL_BTINT;
  819. if (!(config & ARCH_PERFMON_EVENTSEL_OS))
  820. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
  821. if (!(config & ARCH_PERFMON_EVENTSEL_USR))
  822. debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
  823. update_debugctlmsr(debugctlmsr);
  824. }
  825. static void intel_pmu_disable_bts(void)
  826. {
  827. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  828. unsigned long debugctlmsr;
  829. if (!cpuc->ds)
  830. return;
  831. debugctlmsr = get_debugctlmsr();
  832. debugctlmsr &=
  833. ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
  834. X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
  835. update_debugctlmsr(debugctlmsr);
  836. }
  837. /*
  838. * Setup the hardware configuration for a given attr_type
  839. */
  840. static int __hw_perf_event_init(struct perf_event *event)
  841. {
  842. struct perf_event_attr *attr = &event->attr;
  843. struct hw_perf_event *hwc = &event->hw;
  844. u64 config;
  845. int err;
  846. if (!x86_pmu_initialized())
  847. return -ENODEV;
  848. err = 0;
  849. if (!atomic_inc_not_zero(&active_events)) {
  850. mutex_lock(&pmc_reserve_mutex);
  851. if (atomic_read(&active_events) == 0) {
  852. if (!reserve_pmc_hardware())
  853. err = -EBUSY;
  854. else
  855. err = reserve_bts_hardware();
  856. }
  857. if (!err)
  858. atomic_inc(&active_events);
  859. mutex_unlock(&pmc_reserve_mutex);
  860. }
  861. if (err)
  862. return err;
  863. event->destroy = hw_perf_event_destroy;
  864. /*
  865. * Generate PMC IRQs:
  866. * (keep 'enabled' bit clear for now)
  867. */
  868. hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  869. hwc->idx = -1;
  870. /*
  871. * Count user and OS events unless requested not to.
  872. */
  873. if (!attr->exclude_user)
  874. hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
  875. if (!attr->exclude_kernel)
  876. hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
  877. if (!hwc->sample_period) {
  878. hwc->sample_period = x86_pmu.max_period;
  879. hwc->last_period = hwc->sample_period;
  880. atomic64_set(&hwc->period_left, hwc->sample_period);
  881. } else {
  882. /*
  883. * If we have a PMU initialized but no APIC
  884. * interrupts, we cannot sample hardware
  885. * events (user-space has to fall back and
  886. * sample via a hrtimer based software event):
  887. */
  888. if (!x86_pmu.apic)
  889. return -EOPNOTSUPP;
  890. }
  891. /*
  892. * Raw hw_event type provide the config in the hw_event structure
  893. */
  894. if (attr->type == PERF_TYPE_RAW) {
  895. hwc->config |= x86_pmu.raw_event(attr->config);
  896. return 0;
  897. }
  898. if (attr->type == PERF_TYPE_HW_CACHE)
  899. return set_ext_hw_attr(hwc, attr);
  900. if (attr->config >= x86_pmu.max_events)
  901. return -EINVAL;
  902. /*
  903. * The generic map:
  904. */
  905. config = x86_pmu.event_map(attr->config);
  906. if (config == 0)
  907. return -ENOENT;
  908. if (config == -1LL)
  909. return -EINVAL;
  910. /*
  911. * Branch tracing:
  912. */
  913. if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
  914. (hwc->sample_period == 1)) {
  915. /* BTS is not supported by this architecture. */
  916. if (!bts_available())
  917. return -EOPNOTSUPP;
  918. /* BTS is currently only allowed for user-mode. */
  919. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  920. return -EOPNOTSUPP;
  921. }
  922. hwc->config |= config;
  923. return 0;
  924. }
  925. static void p6_pmu_disable_all(void)
  926. {
  927. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  928. u64 val;
  929. if (!cpuc->enabled)
  930. return;
  931. cpuc->enabled = 0;
  932. barrier();
  933. /* p6 only has one enable register */
  934. rdmsrl(MSR_P6_EVNTSEL0, val);
  935. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  936. wrmsrl(MSR_P6_EVNTSEL0, val);
  937. }
  938. static void intel_pmu_disable_all(void)
  939. {
  940. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  941. if (!cpuc->enabled)
  942. return;
  943. cpuc->enabled = 0;
  944. barrier();
  945. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  946. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
  947. intel_pmu_disable_bts();
  948. }
  949. static void amd_pmu_disable_all(void)
  950. {
  951. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  952. int idx;
  953. if (!cpuc->enabled)
  954. return;
  955. cpuc->enabled = 0;
  956. /*
  957. * ensure we write the disable before we start disabling the
  958. * events proper, so that amd_pmu_enable_event() does the
  959. * right thing.
  960. */
  961. barrier();
  962. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  963. u64 val;
  964. if (!test_bit(idx, cpuc->active_mask))
  965. continue;
  966. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  967. if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
  968. continue;
  969. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  970. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  971. }
  972. }
  973. void hw_perf_disable(void)
  974. {
  975. if (!x86_pmu_initialized())
  976. return;
  977. return x86_pmu.disable_all();
  978. }
  979. static void p6_pmu_enable_all(void)
  980. {
  981. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  982. unsigned long val;
  983. if (cpuc->enabled)
  984. return;
  985. cpuc->enabled = 1;
  986. barrier();
  987. /* p6 only has one enable register */
  988. rdmsrl(MSR_P6_EVNTSEL0, val);
  989. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  990. wrmsrl(MSR_P6_EVNTSEL0, val);
  991. }
  992. static void intel_pmu_enable_all(void)
  993. {
  994. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  995. if (cpuc->enabled)
  996. return;
  997. cpuc->enabled = 1;
  998. barrier();
  999. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  1000. if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
  1001. struct perf_event *event =
  1002. cpuc->events[X86_PMC_IDX_FIXED_BTS];
  1003. if (WARN_ON_ONCE(!event))
  1004. return;
  1005. intel_pmu_enable_bts(event->hw.config);
  1006. }
  1007. }
  1008. static void amd_pmu_enable_all(void)
  1009. {
  1010. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1011. int idx;
  1012. if (cpuc->enabled)
  1013. return;
  1014. cpuc->enabled = 1;
  1015. barrier();
  1016. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1017. struct perf_event *event = cpuc->events[idx];
  1018. u64 val;
  1019. if (!test_bit(idx, cpuc->active_mask))
  1020. continue;
  1021. val = event->hw.config;
  1022. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1023. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  1024. }
  1025. }
  1026. void hw_perf_enable(void)
  1027. {
  1028. if (!x86_pmu_initialized())
  1029. return;
  1030. x86_pmu.enable_all();
  1031. }
  1032. static inline u64 intel_pmu_get_status(void)
  1033. {
  1034. u64 status;
  1035. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  1036. return status;
  1037. }
  1038. static inline void intel_pmu_ack_status(u64 ack)
  1039. {
  1040. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  1041. }
  1042. static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1043. {
  1044. (void)checking_wrmsrl(hwc->config_base + idx,
  1045. hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
  1046. }
  1047. static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1048. {
  1049. (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
  1050. }
  1051. static inline void
  1052. intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
  1053. {
  1054. int idx = __idx - X86_PMC_IDX_FIXED;
  1055. u64 ctrl_val, mask;
  1056. mask = 0xfULL << (idx * 4);
  1057. rdmsrl(hwc->config_base, ctrl_val);
  1058. ctrl_val &= ~mask;
  1059. (void)checking_wrmsrl(hwc->config_base, ctrl_val);
  1060. }
  1061. static inline void
  1062. p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1063. {
  1064. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1065. u64 val = P6_NOP_EVENT;
  1066. if (cpuc->enabled)
  1067. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1068. (void)checking_wrmsrl(hwc->config_base + idx, val);
  1069. }
  1070. static inline void
  1071. intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1072. {
  1073. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  1074. intel_pmu_disable_bts();
  1075. return;
  1076. }
  1077. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  1078. intel_pmu_disable_fixed(hwc, idx);
  1079. return;
  1080. }
  1081. x86_pmu_disable_event(hwc, idx);
  1082. }
  1083. static inline void
  1084. amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
  1085. {
  1086. x86_pmu_disable_event(hwc, idx);
  1087. }
  1088. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  1089. /*
  1090. * Set the next IRQ period, based on the hwc->period_left value.
  1091. * To be called with the event disabled in hw:
  1092. */
  1093. static int
  1094. x86_perf_event_set_period(struct perf_event *event,
  1095. struct hw_perf_event *hwc, int idx)
  1096. {
  1097. s64 left = atomic64_read(&hwc->period_left);
  1098. s64 period = hwc->sample_period;
  1099. int err, ret = 0;
  1100. if (idx == X86_PMC_IDX_FIXED_BTS)
  1101. return 0;
  1102. /*
  1103. * If we are way outside a reasonable range then just skip forward:
  1104. */
  1105. if (unlikely(left <= -period)) {
  1106. left = period;
  1107. atomic64_set(&hwc->period_left, left);
  1108. hwc->last_period = period;
  1109. ret = 1;
  1110. }
  1111. if (unlikely(left <= 0)) {
  1112. left += period;
  1113. atomic64_set(&hwc->period_left, left);
  1114. hwc->last_period = period;
  1115. ret = 1;
  1116. }
  1117. /*
  1118. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  1119. */
  1120. if (unlikely(left < 2))
  1121. left = 2;
  1122. if (left > x86_pmu.max_period)
  1123. left = x86_pmu.max_period;
  1124. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  1125. /*
  1126. * The hw event starts counting from this event offset,
  1127. * mark it to be able to extra future deltas:
  1128. */
  1129. atomic64_set(&hwc->prev_count, (u64)-left);
  1130. err = checking_wrmsrl(hwc->event_base + idx,
  1131. (u64)(-left) & x86_pmu.event_mask);
  1132. perf_event_update_userpage(event);
  1133. return ret;
  1134. }
  1135. static inline void
  1136. intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
  1137. {
  1138. int idx = __idx - X86_PMC_IDX_FIXED;
  1139. u64 ctrl_val, bits, mask;
  1140. int err;
  1141. /*
  1142. * Enable IRQ generation (0x8),
  1143. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  1144. * if requested:
  1145. */
  1146. bits = 0x8ULL;
  1147. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  1148. bits |= 0x2;
  1149. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  1150. bits |= 0x1;
  1151. bits <<= (idx * 4);
  1152. mask = 0xfULL << (idx * 4);
  1153. rdmsrl(hwc->config_base, ctrl_val);
  1154. ctrl_val &= ~mask;
  1155. ctrl_val |= bits;
  1156. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  1157. }
  1158. static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1159. {
  1160. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1161. u64 val;
  1162. val = hwc->config;
  1163. if (cpuc->enabled)
  1164. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  1165. (void)checking_wrmsrl(hwc->config_base + idx, val);
  1166. }
  1167. static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1168. {
  1169. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
  1170. if (!__get_cpu_var(cpu_hw_events).enabled)
  1171. return;
  1172. intel_pmu_enable_bts(hwc->config);
  1173. return;
  1174. }
  1175. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  1176. intel_pmu_enable_fixed(hwc, idx);
  1177. return;
  1178. }
  1179. x86_pmu_enable_event(hwc, idx);
  1180. }
  1181. static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
  1182. {
  1183. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1184. if (cpuc->enabled)
  1185. x86_pmu_enable_event(hwc, idx);
  1186. }
  1187. static int fixed_mode_idx(struct hw_perf_event *hwc)
  1188. {
  1189. unsigned int hw_event;
  1190. hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
  1191. if (unlikely((hw_event ==
  1192. x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
  1193. (hwc->sample_period == 1)))
  1194. return X86_PMC_IDX_FIXED_BTS;
  1195. if (!x86_pmu.num_events_fixed)
  1196. return -1;
  1197. /*
  1198. * fixed counters do not take all possible filters
  1199. */
  1200. if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
  1201. return -1;
  1202. if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
  1203. return X86_PMC_IDX_FIXED_INSTRUCTIONS;
  1204. if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
  1205. return X86_PMC_IDX_FIXED_CPU_CYCLES;
  1206. if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
  1207. return X86_PMC_IDX_FIXED_BUS_CYCLES;
  1208. return -1;
  1209. }
  1210. /*
  1211. * generic counter allocator: get next free counter
  1212. */
  1213. static int
  1214. gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
  1215. {
  1216. int idx;
  1217. idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
  1218. return idx == x86_pmu.num_events ? -1 : idx;
  1219. }
  1220. /*
  1221. * intel-specific counter allocator: check event constraints
  1222. */
  1223. static int
  1224. intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
  1225. {
  1226. const struct event_constraint *event_constraint;
  1227. int i, code;
  1228. if (!event_constraints)
  1229. goto skip;
  1230. code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
  1231. for_each_event_constraint(event_constraint, event_constraints) {
  1232. if (code == event_constraint->code) {
  1233. for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
  1234. if (!test_and_set_bit(i, cpuc->used_mask))
  1235. return i;
  1236. }
  1237. return -1;
  1238. }
  1239. }
  1240. skip:
  1241. return gen_get_event_idx(cpuc, hwc);
  1242. }
  1243. static int
  1244. x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
  1245. {
  1246. int idx;
  1247. idx = fixed_mode_idx(hwc);
  1248. if (idx == X86_PMC_IDX_FIXED_BTS) {
  1249. /* BTS is already occupied. */
  1250. if (test_and_set_bit(idx, cpuc->used_mask))
  1251. return -EAGAIN;
  1252. hwc->config_base = 0;
  1253. hwc->event_base = 0;
  1254. hwc->idx = idx;
  1255. } else if (idx >= 0) {
  1256. /*
  1257. * Try to get the fixed event, if that is already taken
  1258. * then try to get a generic event:
  1259. */
  1260. if (test_and_set_bit(idx, cpuc->used_mask))
  1261. goto try_generic;
  1262. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  1263. /*
  1264. * We set it so that event_base + idx in wrmsr/rdmsr maps to
  1265. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  1266. */
  1267. hwc->event_base =
  1268. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  1269. hwc->idx = idx;
  1270. } else {
  1271. idx = hwc->idx;
  1272. /* Try to get the previous generic event again */
  1273. if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
  1274. try_generic:
  1275. idx = x86_pmu.get_event_idx(cpuc, hwc);
  1276. if (idx == -1)
  1277. return -EAGAIN;
  1278. set_bit(idx, cpuc->used_mask);
  1279. hwc->idx = idx;
  1280. }
  1281. hwc->config_base = x86_pmu.eventsel;
  1282. hwc->event_base = x86_pmu.perfctr;
  1283. }
  1284. return idx;
  1285. }
  1286. /*
  1287. * Find a PMC slot for the freshly enabled / scheduled in event:
  1288. */
  1289. static int x86_pmu_enable(struct perf_event *event)
  1290. {
  1291. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1292. struct hw_perf_event *hwc = &event->hw;
  1293. int idx;
  1294. idx = x86_schedule_event(cpuc, hwc);
  1295. if (idx < 0)
  1296. return idx;
  1297. perf_events_lapic_init();
  1298. x86_pmu.disable(hwc, idx);
  1299. cpuc->events[idx] = event;
  1300. set_bit(idx, cpuc->active_mask);
  1301. x86_perf_event_set_period(event, hwc, idx);
  1302. x86_pmu.enable(hwc, idx);
  1303. perf_event_update_userpage(event);
  1304. return 0;
  1305. }
  1306. static void x86_pmu_unthrottle(struct perf_event *event)
  1307. {
  1308. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1309. struct hw_perf_event *hwc = &event->hw;
  1310. if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
  1311. cpuc->events[hwc->idx] != event))
  1312. return;
  1313. x86_pmu.enable(hwc, hwc->idx);
  1314. }
  1315. void perf_event_print_debug(void)
  1316. {
  1317. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  1318. struct cpu_hw_events *cpuc;
  1319. unsigned long flags;
  1320. int cpu, idx;
  1321. if (!x86_pmu.num_events)
  1322. return;
  1323. local_irq_save(flags);
  1324. cpu = smp_processor_id();
  1325. cpuc = &per_cpu(cpu_hw_events, cpu);
  1326. if (x86_pmu.version >= 2) {
  1327. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  1328. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  1329. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  1330. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  1331. pr_info("\n");
  1332. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  1333. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  1334. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  1335. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  1336. }
  1337. pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
  1338. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1339. rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
  1340. rdmsrl(x86_pmu.perfctr + idx, pmc_count);
  1341. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  1342. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  1343. cpu, idx, pmc_ctrl);
  1344. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  1345. cpu, idx, pmc_count);
  1346. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  1347. cpu, idx, prev_left);
  1348. }
  1349. for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
  1350. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  1351. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  1352. cpu, idx, pmc_count);
  1353. }
  1354. local_irq_restore(flags);
  1355. }
  1356. static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
  1357. {
  1358. struct debug_store *ds = cpuc->ds;
  1359. struct bts_record {
  1360. u64 from;
  1361. u64 to;
  1362. u64 flags;
  1363. };
  1364. struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
  1365. struct bts_record *at, *top;
  1366. struct perf_output_handle handle;
  1367. struct perf_event_header header;
  1368. struct perf_sample_data data;
  1369. struct pt_regs regs;
  1370. if (!event)
  1371. return;
  1372. if (!ds)
  1373. return;
  1374. at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
  1375. top = (struct bts_record *)(unsigned long)ds->bts_index;
  1376. if (top <= at)
  1377. return;
  1378. ds->bts_index = ds->bts_buffer_base;
  1379. data.period = event->hw.last_period;
  1380. data.addr = 0;
  1381. data.raw = NULL;
  1382. regs.ip = 0;
  1383. /*
  1384. * Prepare a generic sample, i.e. fill in the invariant fields.
  1385. * We will overwrite the from and to address before we output
  1386. * the sample.
  1387. */
  1388. perf_prepare_sample(&header, &data, event, &regs);
  1389. if (perf_output_begin(&handle, event,
  1390. header.size * (top - at), 1, 1))
  1391. return;
  1392. for (; at < top; at++) {
  1393. data.ip = at->from;
  1394. data.addr = at->to;
  1395. perf_output_sample(&handle, &header, &data, event);
  1396. }
  1397. perf_output_end(&handle);
  1398. /* There's new data available. */
  1399. event->hw.interrupts++;
  1400. event->pending_kill = POLL_IN;
  1401. }
  1402. static void x86_pmu_disable(struct perf_event *event)
  1403. {
  1404. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1405. struct hw_perf_event *hwc = &event->hw;
  1406. int idx = hwc->idx;
  1407. /*
  1408. * Must be done before we disable, otherwise the nmi handler
  1409. * could reenable again:
  1410. */
  1411. clear_bit(idx, cpuc->active_mask);
  1412. x86_pmu.disable(hwc, idx);
  1413. /*
  1414. * Make sure the cleared pointer becomes visible before we
  1415. * (potentially) free the event:
  1416. */
  1417. barrier();
  1418. /*
  1419. * Drain the remaining delta count out of a event
  1420. * that we are disabling:
  1421. */
  1422. x86_perf_event_update(event, hwc, idx);
  1423. /* Drain the remaining BTS records. */
  1424. if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
  1425. intel_pmu_drain_bts_buffer(cpuc);
  1426. cpuc->events[idx] = NULL;
  1427. clear_bit(idx, cpuc->used_mask);
  1428. perf_event_update_userpage(event);
  1429. }
  1430. /*
  1431. * Save and restart an expired event. Called by NMI contexts,
  1432. * so it has to be careful about preempting normal event ops:
  1433. */
  1434. static int intel_pmu_save_and_restart(struct perf_event *event)
  1435. {
  1436. struct hw_perf_event *hwc = &event->hw;
  1437. int idx = hwc->idx;
  1438. int ret;
  1439. x86_perf_event_update(event, hwc, idx);
  1440. ret = x86_perf_event_set_period(event, hwc, idx);
  1441. if (event->state == PERF_EVENT_STATE_ACTIVE)
  1442. intel_pmu_enable_event(hwc, idx);
  1443. return ret;
  1444. }
  1445. static void intel_pmu_reset(void)
  1446. {
  1447. struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
  1448. unsigned long flags;
  1449. int idx;
  1450. if (!x86_pmu.num_events)
  1451. return;
  1452. local_irq_save(flags);
  1453. printk("clearing PMU state on CPU#%d\n", smp_processor_id());
  1454. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1455. checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
  1456. checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
  1457. }
  1458. for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
  1459. checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
  1460. }
  1461. if (ds)
  1462. ds->bts_index = ds->bts_buffer_base;
  1463. local_irq_restore(flags);
  1464. }
  1465. static int p6_pmu_handle_irq(struct pt_regs *regs)
  1466. {
  1467. struct perf_sample_data data;
  1468. struct cpu_hw_events *cpuc;
  1469. struct perf_event *event;
  1470. struct hw_perf_event *hwc;
  1471. int idx, handled = 0;
  1472. u64 val;
  1473. data.addr = 0;
  1474. data.raw = NULL;
  1475. cpuc = &__get_cpu_var(cpu_hw_events);
  1476. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1477. if (!test_bit(idx, cpuc->active_mask))
  1478. continue;
  1479. event = cpuc->events[idx];
  1480. hwc = &event->hw;
  1481. val = x86_perf_event_update(event, hwc, idx);
  1482. if (val & (1ULL << (x86_pmu.event_bits - 1)))
  1483. continue;
  1484. /*
  1485. * event overflow
  1486. */
  1487. handled = 1;
  1488. data.period = event->hw.last_period;
  1489. if (!x86_perf_event_set_period(event, hwc, idx))
  1490. continue;
  1491. if (perf_event_overflow(event, 1, &data, regs))
  1492. p6_pmu_disable_event(hwc, idx);
  1493. }
  1494. if (handled)
  1495. inc_irq_stat(apic_perf_irqs);
  1496. return handled;
  1497. }
  1498. /*
  1499. * This handler is triggered by the local APIC, so the APIC IRQ handling
  1500. * rules apply:
  1501. */
  1502. static int intel_pmu_handle_irq(struct pt_regs *regs)
  1503. {
  1504. struct perf_sample_data data;
  1505. struct cpu_hw_events *cpuc;
  1506. int bit, loops;
  1507. u64 ack, status;
  1508. data.addr = 0;
  1509. data.raw = NULL;
  1510. cpuc = &__get_cpu_var(cpu_hw_events);
  1511. perf_disable();
  1512. intel_pmu_drain_bts_buffer(cpuc);
  1513. status = intel_pmu_get_status();
  1514. if (!status) {
  1515. perf_enable();
  1516. return 0;
  1517. }
  1518. loops = 0;
  1519. again:
  1520. if (++loops > 100) {
  1521. WARN_ONCE(1, "perfevents: irq loop stuck!\n");
  1522. perf_event_print_debug();
  1523. intel_pmu_reset();
  1524. perf_enable();
  1525. return 1;
  1526. }
  1527. inc_irq_stat(apic_perf_irqs);
  1528. ack = status;
  1529. for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  1530. struct perf_event *event = cpuc->events[bit];
  1531. clear_bit(bit, (unsigned long *) &status);
  1532. if (!test_bit(bit, cpuc->active_mask))
  1533. continue;
  1534. if (!intel_pmu_save_and_restart(event))
  1535. continue;
  1536. data.period = event->hw.last_period;
  1537. if (perf_event_overflow(event, 1, &data, regs))
  1538. intel_pmu_disable_event(&event->hw, bit);
  1539. }
  1540. intel_pmu_ack_status(ack);
  1541. /*
  1542. * Repeat if there is more work to be done:
  1543. */
  1544. status = intel_pmu_get_status();
  1545. if (status)
  1546. goto again;
  1547. perf_enable();
  1548. return 1;
  1549. }
  1550. static int amd_pmu_handle_irq(struct pt_regs *regs)
  1551. {
  1552. struct perf_sample_data data;
  1553. struct cpu_hw_events *cpuc;
  1554. struct perf_event *event;
  1555. struct hw_perf_event *hwc;
  1556. int idx, handled = 0;
  1557. u64 val;
  1558. data.addr = 0;
  1559. data.raw = NULL;
  1560. cpuc = &__get_cpu_var(cpu_hw_events);
  1561. for (idx = 0; idx < x86_pmu.num_events; idx++) {
  1562. if (!test_bit(idx, cpuc->active_mask))
  1563. continue;
  1564. event = cpuc->events[idx];
  1565. hwc = &event->hw;
  1566. val = x86_perf_event_update(event, hwc, idx);
  1567. if (val & (1ULL << (x86_pmu.event_bits - 1)))
  1568. continue;
  1569. /*
  1570. * event overflow
  1571. */
  1572. handled = 1;
  1573. data.period = event->hw.last_period;
  1574. if (!x86_perf_event_set_period(event, hwc, idx))
  1575. continue;
  1576. if (perf_event_overflow(event, 1, &data, regs))
  1577. amd_pmu_disable_event(hwc, idx);
  1578. }
  1579. if (handled)
  1580. inc_irq_stat(apic_perf_irqs);
  1581. return handled;
  1582. }
  1583. void smp_perf_pending_interrupt(struct pt_regs *regs)
  1584. {
  1585. irq_enter();
  1586. ack_APIC_irq();
  1587. inc_irq_stat(apic_pending_irqs);
  1588. perf_event_do_pending();
  1589. irq_exit();
  1590. }
  1591. void set_perf_event_pending(void)
  1592. {
  1593. #ifdef CONFIG_X86_LOCAL_APIC
  1594. if (!x86_pmu.apic || !x86_pmu_initialized())
  1595. return;
  1596. apic->send_IPI_self(LOCAL_PENDING_VECTOR);
  1597. #endif
  1598. }
  1599. void perf_events_lapic_init(void)
  1600. {
  1601. #ifdef CONFIG_X86_LOCAL_APIC
  1602. if (!x86_pmu.apic || !x86_pmu_initialized())
  1603. return;
  1604. /*
  1605. * Always use NMI for PMU
  1606. */
  1607. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1608. #endif
  1609. }
  1610. static int __kprobes
  1611. perf_event_nmi_handler(struct notifier_block *self,
  1612. unsigned long cmd, void *__args)
  1613. {
  1614. struct die_args *args = __args;
  1615. struct pt_regs *regs;
  1616. if (!atomic_read(&active_events))
  1617. return NOTIFY_DONE;
  1618. switch (cmd) {
  1619. case DIE_NMI:
  1620. case DIE_NMI_IPI:
  1621. break;
  1622. default:
  1623. return NOTIFY_DONE;
  1624. }
  1625. regs = args->regs;
  1626. #ifdef CONFIG_X86_LOCAL_APIC
  1627. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1628. #endif
  1629. /*
  1630. * Can't rely on the handled return value to say it was our NMI, two
  1631. * events could trigger 'simultaneously' raising two back-to-back NMIs.
  1632. *
  1633. * If the first NMI handles both, the latter will be empty and daze
  1634. * the CPU.
  1635. */
  1636. x86_pmu.handle_irq(regs);
  1637. return NOTIFY_STOP;
  1638. }
  1639. static __read_mostly struct notifier_block perf_event_nmi_notifier = {
  1640. .notifier_call = perf_event_nmi_handler,
  1641. .next = NULL,
  1642. .priority = 1
  1643. };
  1644. static __initconst struct x86_pmu p6_pmu = {
  1645. .name = "p6",
  1646. .handle_irq = p6_pmu_handle_irq,
  1647. .disable_all = p6_pmu_disable_all,
  1648. .enable_all = p6_pmu_enable_all,
  1649. .enable = p6_pmu_enable_event,
  1650. .disable = p6_pmu_disable_event,
  1651. .eventsel = MSR_P6_EVNTSEL0,
  1652. .perfctr = MSR_P6_PERFCTR0,
  1653. .event_map = p6_pmu_event_map,
  1654. .raw_event = p6_pmu_raw_event,
  1655. .max_events = ARRAY_SIZE(p6_perfmon_event_map),
  1656. .apic = 1,
  1657. .max_period = (1ULL << 31) - 1,
  1658. .version = 0,
  1659. .num_events = 2,
  1660. /*
  1661. * Events have 40 bits implemented. However they are designed such
  1662. * that bits [32-39] are sign extensions of bit 31. As such the
  1663. * effective width of a event for P6-like PMU is 32 bits only.
  1664. *
  1665. * See IA-32 Intel Architecture Software developer manual Vol 3B
  1666. */
  1667. .event_bits = 32,
  1668. .event_mask = (1ULL << 32) - 1,
  1669. .get_event_idx = intel_get_event_idx,
  1670. };
  1671. static __initconst struct x86_pmu intel_pmu = {
  1672. .name = "Intel",
  1673. .handle_irq = intel_pmu_handle_irq,
  1674. .disable_all = intel_pmu_disable_all,
  1675. .enable_all = intel_pmu_enable_all,
  1676. .enable = intel_pmu_enable_event,
  1677. .disable = intel_pmu_disable_event,
  1678. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  1679. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  1680. .event_map = intel_pmu_event_map,
  1681. .raw_event = intel_pmu_raw_event,
  1682. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  1683. .apic = 1,
  1684. /*
  1685. * Intel PMCs cannot be accessed sanely above 32 bit width,
  1686. * so we install an artificial 1<<31 period regardless of
  1687. * the generic event period:
  1688. */
  1689. .max_period = (1ULL << 31) - 1,
  1690. .enable_bts = intel_pmu_enable_bts,
  1691. .disable_bts = intel_pmu_disable_bts,
  1692. .get_event_idx = intel_get_event_idx,
  1693. };
  1694. static __initconst struct x86_pmu amd_pmu = {
  1695. .name = "AMD",
  1696. .handle_irq = amd_pmu_handle_irq,
  1697. .disable_all = amd_pmu_disable_all,
  1698. .enable_all = amd_pmu_enable_all,
  1699. .enable = amd_pmu_enable_event,
  1700. .disable = amd_pmu_disable_event,
  1701. .eventsel = MSR_K7_EVNTSEL0,
  1702. .perfctr = MSR_K7_PERFCTR0,
  1703. .event_map = amd_pmu_event_map,
  1704. .raw_event = amd_pmu_raw_event,
  1705. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  1706. .num_events = 4,
  1707. .event_bits = 48,
  1708. .event_mask = (1ULL << 48) - 1,
  1709. .apic = 1,
  1710. /* use highest bit to detect overflow */
  1711. .max_period = (1ULL << 47) - 1,
  1712. .get_event_idx = gen_get_event_idx,
  1713. };
  1714. static __init int p6_pmu_init(void)
  1715. {
  1716. switch (boot_cpu_data.x86_model) {
  1717. case 1:
  1718. case 3: /* Pentium Pro */
  1719. case 5:
  1720. case 6: /* Pentium II */
  1721. case 7:
  1722. case 8:
  1723. case 11: /* Pentium III */
  1724. event_constraints = intel_p6_event_constraints;
  1725. break;
  1726. case 9:
  1727. case 13:
  1728. /* Pentium M */
  1729. event_constraints = intel_p6_event_constraints;
  1730. break;
  1731. default:
  1732. pr_cont("unsupported p6 CPU model %d ",
  1733. boot_cpu_data.x86_model);
  1734. return -ENODEV;
  1735. }
  1736. x86_pmu = p6_pmu;
  1737. return 0;
  1738. }
  1739. static __init int intel_pmu_init(void)
  1740. {
  1741. union cpuid10_edx edx;
  1742. union cpuid10_eax eax;
  1743. unsigned int unused;
  1744. unsigned int ebx;
  1745. int version;
  1746. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  1747. /* check for P6 processor family */
  1748. if (boot_cpu_data.x86 == 6) {
  1749. return p6_pmu_init();
  1750. } else {
  1751. return -ENODEV;
  1752. }
  1753. }
  1754. /*
  1755. * Check whether the Architectural PerfMon supports
  1756. * Branch Misses Retired hw_event or not.
  1757. */
  1758. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  1759. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  1760. return -ENODEV;
  1761. version = eax.split.version_id;
  1762. if (version < 2)
  1763. return -ENODEV;
  1764. x86_pmu = intel_pmu;
  1765. x86_pmu.version = version;
  1766. x86_pmu.num_events = eax.split.num_events;
  1767. x86_pmu.event_bits = eax.split.bit_width;
  1768. x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
  1769. /*
  1770. * Quirk: v2 perfmon does not report fixed-purpose events, so
  1771. * assume at least 3 events:
  1772. */
  1773. x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
  1774. /*
  1775. * Install the hw-cache-events table:
  1776. */
  1777. switch (boot_cpu_data.x86_model) {
  1778. case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
  1779. case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
  1780. case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
  1781. case 29: /* six-core 45 nm xeon "Dunnington" */
  1782. memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
  1783. sizeof(hw_cache_event_ids));
  1784. pr_cont("Core2 events, ");
  1785. event_constraints = intel_core_event_constraints;
  1786. break;
  1787. default:
  1788. case 26:
  1789. memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
  1790. sizeof(hw_cache_event_ids));
  1791. event_constraints = intel_nehalem_event_constraints;
  1792. pr_cont("Nehalem/Corei7 events, ");
  1793. break;
  1794. case 28:
  1795. memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
  1796. sizeof(hw_cache_event_ids));
  1797. pr_cont("Atom events, ");
  1798. break;
  1799. }
  1800. return 0;
  1801. }
  1802. static __init int amd_pmu_init(void)
  1803. {
  1804. /* Performance-monitoring supported from K7 and later: */
  1805. if (boot_cpu_data.x86 < 6)
  1806. return -ENODEV;
  1807. x86_pmu = amd_pmu;
  1808. /* Events are common for all AMDs */
  1809. memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
  1810. sizeof(hw_cache_event_ids));
  1811. return 0;
  1812. }
  1813. static void __init pmu_check_apic(void)
  1814. {
  1815. if (cpu_has_apic)
  1816. return;
  1817. x86_pmu.apic = 0;
  1818. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  1819. pr_info("no hardware sampling interrupt available.\n");
  1820. }
  1821. void __init init_hw_perf_events(void)
  1822. {
  1823. int err;
  1824. pr_info("Performance Events: ");
  1825. switch (boot_cpu_data.x86_vendor) {
  1826. case X86_VENDOR_INTEL:
  1827. err = intel_pmu_init();
  1828. break;
  1829. case X86_VENDOR_AMD:
  1830. err = amd_pmu_init();
  1831. break;
  1832. default:
  1833. return;
  1834. }
  1835. if (err != 0) {
  1836. pr_cont("no PMU driver, software events only.\n");
  1837. return;
  1838. }
  1839. pmu_check_apic();
  1840. pr_cont("%s PMU driver.\n", x86_pmu.name);
  1841. if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
  1842. WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
  1843. x86_pmu.num_events, X86_PMC_MAX_GENERIC);
  1844. x86_pmu.num_events = X86_PMC_MAX_GENERIC;
  1845. }
  1846. perf_event_mask = (1 << x86_pmu.num_events) - 1;
  1847. perf_max_events = x86_pmu.num_events;
  1848. if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
  1849. WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
  1850. x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
  1851. x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
  1852. }
  1853. perf_event_mask |=
  1854. ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
  1855. x86_pmu.intel_ctrl = perf_event_mask;
  1856. perf_events_lapic_init();
  1857. register_die_notifier(&perf_event_nmi_notifier);
  1858. pr_info("... version: %d\n", x86_pmu.version);
  1859. pr_info("... bit width: %d\n", x86_pmu.event_bits);
  1860. pr_info("... generic registers: %d\n", x86_pmu.num_events);
  1861. pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
  1862. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  1863. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
  1864. pr_info("... event mask: %016Lx\n", perf_event_mask);
  1865. }
  1866. static inline void x86_pmu_read(struct perf_event *event)
  1867. {
  1868. x86_perf_event_update(event, &event->hw, event->hw.idx);
  1869. }
  1870. static const struct pmu pmu = {
  1871. .enable = x86_pmu_enable,
  1872. .disable = x86_pmu_disable,
  1873. .read = x86_pmu_read,
  1874. .unthrottle = x86_pmu_unthrottle,
  1875. };
  1876. static int
  1877. validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
  1878. {
  1879. struct hw_perf_event fake_event = event->hw;
  1880. if (event->pmu && event->pmu != &pmu)
  1881. return 0;
  1882. return x86_schedule_event(cpuc, &fake_event) >= 0;
  1883. }
  1884. static int validate_group(struct perf_event *event)
  1885. {
  1886. struct perf_event *sibling, *leader = event->group_leader;
  1887. struct cpu_hw_events fake_pmu;
  1888. memset(&fake_pmu, 0, sizeof(fake_pmu));
  1889. if (!validate_event(&fake_pmu, leader))
  1890. return -ENOSPC;
  1891. list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
  1892. if (!validate_event(&fake_pmu, sibling))
  1893. return -ENOSPC;
  1894. }
  1895. if (!validate_event(&fake_pmu, event))
  1896. return -ENOSPC;
  1897. return 0;
  1898. }
  1899. const struct pmu *hw_perf_event_init(struct perf_event *event)
  1900. {
  1901. int err;
  1902. err = __hw_perf_event_init(event);
  1903. if (!err) {
  1904. if (event->group_leader != event)
  1905. err = validate_group(event);
  1906. }
  1907. if (err) {
  1908. if (event->destroy)
  1909. event->destroy(event);
  1910. return ERR_PTR(err);
  1911. }
  1912. return &pmu;
  1913. }
  1914. /*
  1915. * callchain support
  1916. */
  1917. static inline
  1918. void callchain_store(struct perf_callchain_entry *entry, u64 ip)
  1919. {
  1920. if (entry->nr < PERF_MAX_STACK_DEPTH)
  1921. entry->ip[entry->nr++] = ip;
  1922. }
  1923. static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
  1924. static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
  1925. static DEFINE_PER_CPU(int, in_ignored_frame);
  1926. static void
  1927. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  1928. {
  1929. /* Ignore warnings */
  1930. }
  1931. static void backtrace_warning(void *data, char *msg)
  1932. {
  1933. /* Ignore warnings */
  1934. }
  1935. static int backtrace_stack(void *data, char *name)
  1936. {
  1937. per_cpu(in_ignored_frame, smp_processor_id()) =
  1938. x86_is_stack_id(NMI_STACK, name) ||
  1939. x86_is_stack_id(DEBUG_STACK, name);
  1940. return 0;
  1941. }
  1942. static void backtrace_address(void *data, unsigned long addr, int reliable)
  1943. {
  1944. struct perf_callchain_entry *entry = data;
  1945. if (per_cpu(in_ignored_frame, smp_processor_id()))
  1946. return;
  1947. if (reliable)
  1948. callchain_store(entry, addr);
  1949. }
  1950. static const struct stacktrace_ops backtrace_ops = {
  1951. .warning = backtrace_warning,
  1952. .warning_symbol = backtrace_warning_symbol,
  1953. .stack = backtrace_stack,
  1954. .address = backtrace_address,
  1955. .walk_stack = print_context_stack_bp,
  1956. };
  1957. #include "../dumpstack.h"
  1958. static void
  1959. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1960. {
  1961. callchain_store(entry, PERF_CONTEXT_KERNEL);
  1962. callchain_store(entry, regs->ip);
  1963. dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
  1964. }
  1965. /*
  1966. * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
  1967. */
  1968. static unsigned long
  1969. copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
  1970. {
  1971. unsigned long offset, addr = (unsigned long)from;
  1972. int type = in_nmi() ? KM_NMI : KM_IRQ0;
  1973. unsigned long size, len = 0;
  1974. struct page *page;
  1975. void *map;
  1976. int ret;
  1977. do {
  1978. ret = __get_user_pages_fast(addr, 1, 0, &page);
  1979. if (!ret)
  1980. break;
  1981. offset = addr & (PAGE_SIZE - 1);
  1982. size = min(PAGE_SIZE - offset, n - len);
  1983. map = kmap_atomic(page, type);
  1984. memcpy(to, map+offset, size);
  1985. kunmap_atomic(map, type);
  1986. put_page(page);
  1987. len += size;
  1988. to += size;
  1989. addr += size;
  1990. } while (len < n);
  1991. return len;
  1992. }
  1993. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  1994. {
  1995. unsigned long bytes;
  1996. bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
  1997. return bytes == sizeof(*frame);
  1998. }
  1999. static void
  2000. perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  2001. {
  2002. struct stack_frame frame;
  2003. const void __user *fp;
  2004. if (!user_mode(regs))
  2005. regs = task_pt_regs(current);
  2006. fp = (void __user *)regs->bp;
  2007. callchain_store(entry, PERF_CONTEXT_USER);
  2008. callchain_store(entry, regs->ip);
  2009. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  2010. frame.next_frame = NULL;
  2011. frame.return_address = 0;
  2012. if (!copy_stack_frame(fp, &frame))
  2013. break;
  2014. if ((unsigned long)fp < regs->sp)
  2015. break;
  2016. callchain_store(entry, frame.return_address);
  2017. fp = frame.next_frame;
  2018. }
  2019. }
  2020. static void
  2021. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  2022. {
  2023. int is_user;
  2024. if (!regs)
  2025. return;
  2026. is_user = user_mode(regs);
  2027. if (!current || current->pid == 0)
  2028. return;
  2029. if (is_user && current->state != TASK_RUNNING)
  2030. return;
  2031. if (!is_user)
  2032. perf_callchain_kernel(regs, entry);
  2033. if (current->mm)
  2034. perf_callchain_user(regs, entry);
  2035. }
  2036. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  2037. {
  2038. struct perf_callchain_entry *entry;
  2039. if (in_nmi())
  2040. entry = &__get_cpu_var(pmc_nmi_entry);
  2041. else
  2042. entry = &__get_cpu_var(pmc_irq_entry);
  2043. entry->nr = 0;
  2044. perf_do_callchain(regs, entry);
  2045. return entry;
  2046. }
  2047. void hw_perf_event_setup_online(int cpu)
  2048. {
  2049. init_debug_store_on_cpu(cpu);
  2050. }