perf_event.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. #include <linux/capability.h>
  16. #include <linux/notifier.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/module.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/sched.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/slab.h>
  24. #include <linux/cpu.h>
  25. #include <linux/bitops.h>
  26. #include <linux/device.h>
  27. #include <asm/apic.h>
  28. #include <asm/stacktrace.h>
  29. #include <asm/nmi.h>
  30. #include <asm/smp.h>
  31. #include <asm/alternative.h>
  32. #include <asm/timer.h>
  33. #include <asm/desc.h>
  34. #include <asm/ldt.h>
  35. #include "perf_event.h"
  36. struct x86_pmu x86_pmu __read_mostly;
  37. DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  38. .enabled = 1,
  39. };
  40. u64 __read_mostly hw_cache_event_ids
  41. [PERF_COUNT_HW_CACHE_MAX]
  42. [PERF_COUNT_HW_CACHE_OP_MAX]
  43. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  44. u64 __read_mostly hw_cache_extra_regs
  45. [PERF_COUNT_HW_CACHE_MAX]
  46. [PERF_COUNT_HW_CACHE_OP_MAX]
  47. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  48. /*
  49. * Propagate event elapsed time into the generic event.
  50. * Can only be executed on the CPU where the event is active.
  51. * Returns the delta events processed.
  52. */
  53. u64 x86_perf_event_update(struct perf_event *event)
  54. {
  55. struct hw_perf_event *hwc = &event->hw;
  56. int shift = 64 - x86_pmu.cntval_bits;
  57. u64 prev_raw_count, new_raw_count;
  58. int idx = hwc->idx;
  59. s64 delta;
  60. if (idx == INTEL_PMC_IDX_FIXED_BTS)
  61. return 0;
  62. /*
  63. * Careful: an NMI might modify the previous event value.
  64. *
  65. * Our tactic to handle this is to first atomically read and
  66. * exchange a new raw count - then add that new-prev delta
  67. * count to the generic event atomically:
  68. */
  69. again:
  70. prev_raw_count = local64_read(&hwc->prev_count);
  71. rdpmcl(hwc->event_base_rdpmc, new_raw_count);
  72. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  73. new_raw_count) != prev_raw_count)
  74. goto again;
  75. /*
  76. * Now we have the new raw value and have updated the prev
  77. * timestamp already. We can now calculate the elapsed delta
  78. * (event-)time and add that to the generic event.
  79. *
  80. * Careful, not all hw sign-extends above the physical width
  81. * of the count.
  82. */
  83. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  84. delta >>= shift;
  85. local64_add(delta, &event->count);
  86. local64_sub(delta, &hwc->period_left);
  87. return new_raw_count;
  88. }
  89. /*
  90. * Find and validate any extra registers to set up.
  91. */
  92. static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
  93. {
  94. struct hw_perf_event_extra *reg;
  95. struct extra_reg *er;
  96. reg = &event->hw.extra_reg;
  97. if (!x86_pmu.extra_regs)
  98. return 0;
  99. for (er = x86_pmu.extra_regs; er->msr; er++) {
  100. if (er->event != (config & er->config_mask))
  101. continue;
  102. if (event->attr.config1 & ~er->valid_mask)
  103. return -EINVAL;
  104. reg->idx = er->idx;
  105. reg->config = event->attr.config1;
  106. reg->reg = er->msr;
  107. break;
  108. }
  109. return 0;
  110. }
  111. static atomic_t active_events;
  112. static DEFINE_MUTEX(pmc_reserve_mutex);
  113. #ifdef CONFIG_X86_LOCAL_APIC
  114. static bool reserve_pmc_hardware(void)
  115. {
  116. int i;
  117. for (i = 0; i < x86_pmu.num_counters; i++) {
  118. if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
  119. goto perfctr_fail;
  120. }
  121. for (i = 0; i < x86_pmu.num_counters; i++) {
  122. if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
  123. goto eventsel_fail;
  124. }
  125. return true;
  126. eventsel_fail:
  127. for (i--; i >= 0; i--)
  128. release_evntsel_nmi(x86_pmu_config_addr(i));
  129. i = x86_pmu.num_counters;
  130. perfctr_fail:
  131. for (i--; i >= 0; i--)
  132. release_perfctr_nmi(x86_pmu_event_addr(i));
  133. return false;
  134. }
  135. static void release_pmc_hardware(void)
  136. {
  137. int i;
  138. for (i = 0; i < x86_pmu.num_counters; i++) {
  139. release_perfctr_nmi(x86_pmu_event_addr(i));
  140. release_evntsel_nmi(x86_pmu_config_addr(i));
  141. }
  142. }
  143. #else
  144. static bool reserve_pmc_hardware(void) { return true; }
  145. static void release_pmc_hardware(void) {}
  146. #endif
  147. static bool check_hw_exists(void)
  148. {
  149. u64 val, val_fail, val_new= ~0;
  150. int i, reg, reg_fail, ret = 0;
  151. int bios_fail = 0;
  152. /*
  153. * Check to see if the BIOS enabled any of the counters, if so
  154. * complain and bail.
  155. */
  156. for (i = 0; i < x86_pmu.num_counters; i++) {
  157. reg = x86_pmu_config_addr(i);
  158. ret = rdmsrl_safe(reg, &val);
  159. if (ret)
  160. goto msr_fail;
  161. if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
  162. bios_fail = 1;
  163. val_fail = val;
  164. reg_fail = reg;
  165. }
  166. }
  167. if (x86_pmu.num_counters_fixed) {
  168. reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  169. ret = rdmsrl_safe(reg, &val);
  170. if (ret)
  171. goto msr_fail;
  172. for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
  173. if (val & (0x03 << i*4)) {
  174. bios_fail = 1;
  175. val_fail = val;
  176. reg_fail = reg;
  177. }
  178. }
  179. }
  180. /*
  181. * Read the current value, change it and read it back to see if it
  182. * matches, this is needed to detect certain hardware emulators
  183. * (qemu/kvm) that don't trap on the MSR access and always return 0s.
  184. */
  185. reg = x86_pmu_event_addr(0);
  186. if (rdmsrl_safe(reg, &val))
  187. goto msr_fail;
  188. val ^= 0xffffUL;
  189. ret = wrmsrl_safe(reg, val);
  190. ret |= rdmsrl_safe(reg, &val_new);
  191. if (ret || val != val_new)
  192. goto msr_fail;
  193. /*
  194. * We still allow the PMU driver to operate:
  195. */
  196. if (bios_fail) {
  197. printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
  198. printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
  199. }
  200. return true;
  201. msr_fail:
  202. printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
  203. printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
  204. return false;
  205. }
  206. static void hw_perf_event_destroy(struct perf_event *event)
  207. {
  208. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  209. release_pmc_hardware();
  210. release_ds_buffers();
  211. mutex_unlock(&pmc_reserve_mutex);
  212. }
  213. }
  214. static inline int x86_pmu_initialized(void)
  215. {
  216. return x86_pmu.handle_irq != NULL;
  217. }
  218. static inline int
  219. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
  220. {
  221. struct perf_event_attr *attr = &event->attr;
  222. unsigned int cache_type, cache_op, cache_result;
  223. u64 config, val;
  224. config = attr->config;
  225. cache_type = (config >> 0) & 0xff;
  226. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  227. return -EINVAL;
  228. cache_op = (config >> 8) & 0xff;
  229. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  230. return -EINVAL;
  231. cache_result = (config >> 16) & 0xff;
  232. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  233. return -EINVAL;
  234. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  235. if (val == 0)
  236. return -ENOENT;
  237. if (val == -1)
  238. return -EINVAL;
  239. hwc->config |= val;
  240. attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
  241. return x86_pmu_extra_regs(val, event);
  242. }
  243. int x86_setup_perfctr(struct perf_event *event)
  244. {
  245. struct perf_event_attr *attr = &event->attr;
  246. struct hw_perf_event *hwc = &event->hw;
  247. u64 config;
  248. if (!is_sampling_event(event)) {
  249. hwc->sample_period = x86_pmu.max_period;
  250. hwc->last_period = hwc->sample_period;
  251. local64_set(&hwc->period_left, hwc->sample_period);
  252. } else {
  253. /*
  254. * If we have a PMU initialized but no APIC
  255. * interrupts, we cannot sample hardware
  256. * events (user-space has to fall back and
  257. * sample via a hrtimer based software event):
  258. */
  259. if (!x86_pmu.apic)
  260. return -EOPNOTSUPP;
  261. }
  262. if (attr->type == PERF_TYPE_RAW)
  263. return x86_pmu_extra_regs(event->attr.config, event);
  264. if (attr->type == PERF_TYPE_HW_CACHE)
  265. return set_ext_hw_attr(hwc, event);
  266. if (attr->config >= x86_pmu.max_events)
  267. return -EINVAL;
  268. /*
  269. * The generic map:
  270. */
  271. config = x86_pmu.event_map(attr->config);
  272. if (config == 0)
  273. return -ENOENT;
  274. if (config == -1LL)
  275. return -EINVAL;
  276. /*
  277. * Branch tracing:
  278. */
  279. if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
  280. !attr->freq && hwc->sample_period == 1) {
  281. /* BTS is not supported by this architecture. */
  282. if (!x86_pmu.bts_active)
  283. return -EOPNOTSUPP;
  284. /* BTS is currently only allowed for user-mode. */
  285. if (!attr->exclude_kernel)
  286. return -EOPNOTSUPP;
  287. }
  288. hwc->config |= config;
  289. return 0;
  290. }
  291. /*
  292. * check that branch_sample_type is compatible with
  293. * settings needed for precise_ip > 1 which implies
  294. * using the LBR to capture ALL taken branches at the
  295. * priv levels of the measurement
  296. */
  297. static inline int precise_br_compat(struct perf_event *event)
  298. {
  299. u64 m = event->attr.branch_sample_type;
  300. u64 b = 0;
  301. /* must capture all branches */
  302. if (!(m & PERF_SAMPLE_BRANCH_ANY))
  303. return 0;
  304. m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
  305. if (!event->attr.exclude_user)
  306. b |= PERF_SAMPLE_BRANCH_USER;
  307. if (!event->attr.exclude_kernel)
  308. b |= PERF_SAMPLE_BRANCH_KERNEL;
  309. /*
  310. * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
  311. */
  312. return m == b;
  313. }
  314. int x86_pmu_hw_config(struct perf_event *event)
  315. {
  316. if (event->attr.precise_ip) {
  317. int precise = 0;
  318. /* Support for constant skid */
  319. if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
  320. precise++;
  321. /* Support for IP fixup */
  322. if (x86_pmu.lbr_nr)
  323. precise++;
  324. }
  325. if (event->attr.precise_ip > precise)
  326. return -EOPNOTSUPP;
  327. /*
  328. * check that PEBS LBR correction does not conflict with
  329. * whatever the user is asking with attr->branch_sample_type
  330. */
  331. if (event->attr.precise_ip > 1 &&
  332. x86_pmu.intel_cap.pebs_format < 2) {
  333. u64 *br_type = &event->attr.branch_sample_type;
  334. if (has_branch_stack(event)) {
  335. if (!precise_br_compat(event))
  336. return -EOPNOTSUPP;
  337. /* branch_sample_type is compatible */
  338. } else {
  339. /*
  340. * user did not specify branch_sample_type
  341. *
  342. * For PEBS fixups, we capture all
  343. * the branches at the priv level of the
  344. * event.
  345. */
  346. *br_type = PERF_SAMPLE_BRANCH_ANY;
  347. if (!event->attr.exclude_user)
  348. *br_type |= PERF_SAMPLE_BRANCH_USER;
  349. if (!event->attr.exclude_kernel)
  350. *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
  351. }
  352. }
  353. }
  354. /*
  355. * Generate PMC IRQs:
  356. * (keep 'enabled' bit clear for now)
  357. */
  358. event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
  359. /*
  360. * Count user and OS events unless requested not to
  361. */
  362. if (!event->attr.exclude_user)
  363. event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
  364. if (!event->attr.exclude_kernel)
  365. event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
  366. if (event->attr.type == PERF_TYPE_RAW)
  367. event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
  368. return x86_setup_perfctr(event);
  369. }
  370. /*
  371. * Setup the hardware configuration for a given attr_type
  372. */
  373. static int __x86_pmu_event_init(struct perf_event *event)
  374. {
  375. int err;
  376. if (!x86_pmu_initialized())
  377. return -ENODEV;
  378. err = 0;
  379. if (!atomic_inc_not_zero(&active_events)) {
  380. mutex_lock(&pmc_reserve_mutex);
  381. if (atomic_read(&active_events) == 0) {
  382. if (!reserve_pmc_hardware())
  383. err = -EBUSY;
  384. else
  385. reserve_ds_buffers();
  386. }
  387. if (!err)
  388. atomic_inc(&active_events);
  389. mutex_unlock(&pmc_reserve_mutex);
  390. }
  391. if (err)
  392. return err;
  393. event->destroy = hw_perf_event_destroy;
  394. event->hw.idx = -1;
  395. event->hw.last_cpu = -1;
  396. event->hw.last_tag = ~0ULL;
  397. /* mark unused */
  398. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  399. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  400. return x86_pmu.hw_config(event);
  401. }
  402. void x86_pmu_disable_all(void)
  403. {
  404. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  405. int idx;
  406. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  407. u64 val;
  408. if (!test_bit(idx, cpuc->active_mask))
  409. continue;
  410. rdmsrl(x86_pmu_config_addr(idx), val);
  411. if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
  412. continue;
  413. val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  414. wrmsrl(x86_pmu_config_addr(idx), val);
  415. }
  416. }
  417. static void x86_pmu_disable(struct pmu *pmu)
  418. {
  419. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  420. if (!x86_pmu_initialized())
  421. return;
  422. if (!cpuc->enabled)
  423. return;
  424. cpuc->n_added = 0;
  425. cpuc->enabled = 0;
  426. barrier();
  427. x86_pmu.disable_all();
  428. }
  429. void x86_pmu_enable_all(int added)
  430. {
  431. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  432. int idx;
  433. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  434. struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
  435. if (!test_bit(idx, cpuc->active_mask))
  436. continue;
  437. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  438. }
  439. }
  440. static struct pmu pmu;
  441. static inline int is_x86_event(struct perf_event *event)
  442. {
  443. return event->pmu == &pmu;
  444. }
  445. /*
  446. * Event scheduler state:
  447. *
  448. * Assign events iterating over all events and counters, beginning
  449. * with events with least weights first. Keep the current iterator
  450. * state in struct sched_state.
  451. */
  452. struct sched_state {
  453. int weight;
  454. int event; /* event index */
  455. int counter; /* counter index */
  456. int unassigned; /* number of events to be assigned left */
  457. unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  458. };
  459. /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
  460. #define SCHED_STATES_MAX 2
  461. struct perf_sched {
  462. int max_weight;
  463. int max_events;
  464. struct perf_event **events;
  465. struct sched_state state;
  466. int saved_states;
  467. struct sched_state saved[SCHED_STATES_MAX];
  468. };
  469. /*
  470. * Initialize interator that runs through all events and counters.
  471. */
  472. static void perf_sched_init(struct perf_sched *sched, struct perf_event **events,
  473. int num, int wmin, int wmax)
  474. {
  475. int idx;
  476. memset(sched, 0, sizeof(*sched));
  477. sched->max_events = num;
  478. sched->max_weight = wmax;
  479. sched->events = events;
  480. for (idx = 0; idx < num; idx++) {
  481. if (events[idx]->hw.constraint->weight == wmin)
  482. break;
  483. }
  484. sched->state.event = idx; /* start with min weight */
  485. sched->state.weight = wmin;
  486. sched->state.unassigned = num;
  487. }
  488. static void perf_sched_save_state(struct perf_sched *sched)
  489. {
  490. if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
  491. return;
  492. sched->saved[sched->saved_states] = sched->state;
  493. sched->saved_states++;
  494. }
  495. static bool perf_sched_restore_state(struct perf_sched *sched)
  496. {
  497. if (!sched->saved_states)
  498. return false;
  499. sched->saved_states--;
  500. sched->state = sched->saved[sched->saved_states];
  501. /* continue with next counter: */
  502. clear_bit(sched->state.counter++, sched->state.used);
  503. return true;
  504. }
  505. /*
  506. * Select a counter for the current event to schedule. Return true on
  507. * success.
  508. */
  509. static bool __perf_sched_find_counter(struct perf_sched *sched)
  510. {
  511. struct event_constraint *c;
  512. int idx;
  513. if (!sched->state.unassigned)
  514. return false;
  515. if (sched->state.event >= sched->max_events)
  516. return false;
  517. c = sched->events[sched->state.event]->hw.constraint;
  518. /* Prefer fixed purpose counters */
  519. if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
  520. idx = INTEL_PMC_IDX_FIXED;
  521. for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
  522. if (!__test_and_set_bit(idx, sched->state.used))
  523. goto done;
  524. }
  525. }
  526. /* Grab the first unused counter starting with idx */
  527. idx = sched->state.counter;
  528. for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
  529. if (!__test_and_set_bit(idx, sched->state.used))
  530. goto done;
  531. }
  532. return false;
  533. done:
  534. sched->state.counter = idx;
  535. if (c->overlap)
  536. perf_sched_save_state(sched);
  537. return true;
  538. }
  539. static bool perf_sched_find_counter(struct perf_sched *sched)
  540. {
  541. while (!__perf_sched_find_counter(sched)) {
  542. if (!perf_sched_restore_state(sched))
  543. return false;
  544. }
  545. return true;
  546. }
  547. /*
  548. * Go through all unassigned events and find the next one to schedule.
  549. * Take events with the least weight first. Return true on success.
  550. */
  551. static bool perf_sched_next_event(struct perf_sched *sched)
  552. {
  553. struct event_constraint *c;
  554. if (!sched->state.unassigned || !--sched->state.unassigned)
  555. return false;
  556. do {
  557. /* next event */
  558. sched->state.event++;
  559. if (sched->state.event >= sched->max_events) {
  560. /* next weight */
  561. sched->state.event = 0;
  562. sched->state.weight++;
  563. if (sched->state.weight > sched->max_weight)
  564. return false;
  565. }
  566. c = sched->events[sched->state.event]->hw.constraint;
  567. } while (c->weight != sched->state.weight);
  568. sched->state.counter = 0; /* start with first counter */
  569. return true;
  570. }
  571. /*
  572. * Assign a counter for each event.
  573. */
  574. int perf_assign_events(struct perf_event **events, int n,
  575. int wmin, int wmax, int *assign)
  576. {
  577. struct perf_sched sched;
  578. perf_sched_init(&sched, events, n, wmin, wmax);
  579. do {
  580. if (!perf_sched_find_counter(&sched))
  581. break; /* failed */
  582. if (assign)
  583. assign[sched.state.event] = sched.state.counter;
  584. } while (perf_sched_next_event(&sched));
  585. return sched.state.unassigned;
  586. }
  587. int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  588. {
  589. struct event_constraint *c;
  590. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  591. struct perf_event *e;
  592. int i, wmin, wmax, num = 0;
  593. struct hw_perf_event *hwc;
  594. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  595. for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
  596. hwc = &cpuc->event_list[i]->hw;
  597. c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  598. hwc->constraint = c;
  599. wmin = min(wmin, c->weight);
  600. wmax = max(wmax, c->weight);
  601. }
  602. /*
  603. * fastpath, try to reuse previous register
  604. */
  605. for (i = 0; i < n; i++) {
  606. hwc = &cpuc->event_list[i]->hw;
  607. c = hwc->constraint;
  608. /* never assigned */
  609. if (hwc->idx == -1)
  610. break;
  611. /* constraint still honored */
  612. if (!test_bit(hwc->idx, c->idxmsk))
  613. break;
  614. /* not already used */
  615. if (test_bit(hwc->idx, used_mask))
  616. break;
  617. __set_bit(hwc->idx, used_mask);
  618. if (assign)
  619. assign[i] = hwc->idx;
  620. }
  621. /* slow path */
  622. if (i != n)
  623. num = perf_assign_events(cpuc->event_list, n, wmin,
  624. wmax, assign);
  625. /*
  626. * Mark the event as committed, so we do not put_constraint()
  627. * in case new events are added and fail scheduling.
  628. */
  629. if (!num && assign) {
  630. for (i = 0; i < n; i++) {
  631. e = cpuc->event_list[i];
  632. e->hw.flags |= PERF_X86_EVENT_COMMITTED;
  633. }
  634. }
  635. /*
  636. * scheduling failed or is just a simulation,
  637. * free resources if necessary
  638. */
  639. if (!assign || num) {
  640. for (i = 0; i < n; i++) {
  641. e = cpuc->event_list[i];
  642. /*
  643. * do not put_constraint() on comitted events,
  644. * because they are good to go
  645. */
  646. if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
  647. continue;
  648. if (x86_pmu.put_event_constraints)
  649. x86_pmu.put_event_constraints(cpuc, e);
  650. }
  651. }
  652. return num ? -EINVAL : 0;
  653. }
  654. /*
  655. * dogrp: true if must collect siblings events (group)
  656. * returns total number of events and error code
  657. */
  658. static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  659. {
  660. struct perf_event *event;
  661. int n, max_count;
  662. max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
  663. /* current number of events already accepted */
  664. n = cpuc->n_events;
  665. if (is_x86_event(leader)) {
  666. if (n >= max_count)
  667. return -EINVAL;
  668. cpuc->event_list[n] = leader;
  669. n++;
  670. }
  671. if (!dogrp)
  672. return n;
  673. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  674. if (!is_x86_event(event) ||
  675. event->state <= PERF_EVENT_STATE_OFF)
  676. continue;
  677. if (n >= max_count)
  678. return -EINVAL;
  679. cpuc->event_list[n] = event;
  680. n++;
  681. }
  682. return n;
  683. }
  684. static inline void x86_assign_hw_event(struct perf_event *event,
  685. struct cpu_hw_events *cpuc, int i)
  686. {
  687. struct hw_perf_event *hwc = &event->hw;
  688. hwc->idx = cpuc->assign[i];
  689. hwc->last_cpu = smp_processor_id();
  690. hwc->last_tag = ++cpuc->tags[i];
  691. if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
  692. hwc->config_base = 0;
  693. hwc->event_base = 0;
  694. } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
  695. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  696. hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
  697. hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
  698. } else {
  699. hwc->config_base = x86_pmu_config_addr(hwc->idx);
  700. hwc->event_base = x86_pmu_event_addr(hwc->idx);
  701. hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
  702. }
  703. }
  704. static inline int match_prev_assignment(struct hw_perf_event *hwc,
  705. struct cpu_hw_events *cpuc,
  706. int i)
  707. {
  708. return hwc->idx == cpuc->assign[i] &&
  709. hwc->last_cpu == smp_processor_id() &&
  710. hwc->last_tag == cpuc->tags[i];
  711. }
  712. static void x86_pmu_start(struct perf_event *event, int flags);
  713. static void x86_pmu_enable(struct pmu *pmu)
  714. {
  715. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  716. struct perf_event *event;
  717. struct hw_perf_event *hwc;
  718. int i, added = cpuc->n_added;
  719. if (!x86_pmu_initialized())
  720. return;
  721. if (cpuc->enabled)
  722. return;
  723. if (cpuc->n_added) {
  724. int n_running = cpuc->n_events - cpuc->n_added;
  725. /*
  726. * apply assignment obtained either from
  727. * hw_perf_group_sched_in() or x86_pmu_enable()
  728. *
  729. * step1: save events moving to new counters
  730. * step2: reprogram moved events into new counters
  731. */
  732. for (i = 0; i < n_running; i++) {
  733. event = cpuc->event_list[i];
  734. hwc = &event->hw;
  735. /*
  736. * we can avoid reprogramming counter if:
  737. * - assigned same counter as last time
  738. * - running on same CPU as last time
  739. * - no other event has used the counter since
  740. */
  741. if (hwc->idx == -1 ||
  742. match_prev_assignment(hwc, cpuc, i))
  743. continue;
  744. /*
  745. * Ensure we don't accidentally enable a stopped
  746. * counter simply because we rescheduled.
  747. */
  748. if (hwc->state & PERF_HES_STOPPED)
  749. hwc->state |= PERF_HES_ARCH;
  750. x86_pmu_stop(event, PERF_EF_UPDATE);
  751. }
  752. for (i = 0; i < cpuc->n_events; i++) {
  753. event = cpuc->event_list[i];
  754. hwc = &event->hw;
  755. if (!match_prev_assignment(hwc, cpuc, i))
  756. x86_assign_hw_event(event, cpuc, i);
  757. else if (i < n_running)
  758. continue;
  759. if (hwc->state & PERF_HES_ARCH)
  760. continue;
  761. x86_pmu_start(event, PERF_EF_RELOAD);
  762. }
  763. cpuc->n_added = 0;
  764. perf_events_lapic_init();
  765. }
  766. cpuc->enabled = 1;
  767. barrier();
  768. x86_pmu.enable_all(added);
  769. }
  770. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  771. /*
  772. * Set the next IRQ period, based on the hwc->period_left value.
  773. * To be called with the event disabled in hw:
  774. */
  775. int x86_perf_event_set_period(struct perf_event *event)
  776. {
  777. struct hw_perf_event *hwc = &event->hw;
  778. s64 left = local64_read(&hwc->period_left);
  779. s64 period = hwc->sample_period;
  780. int ret = 0, idx = hwc->idx;
  781. if (idx == INTEL_PMC_IDX_FIXED_BTS)
  782. return 0;
  783. /*
  784. * If we are way outside a reasonable range then just skip forward:
  785. */
  786. if (unlikely(left <= -period)) {
  787. left = period;
  788. local64_set(&hwc->period_left, left);
  789. hwc->last_period = period;
  790. ret = 1;
  791. }
  792. if (unlikely(left <= 0)) {
  793. left += period;
  794. local64_set(&hwc->period_left, left);
  795. hwc->last_period = period;
  796. ret = 1;
  797. }
  798. /*
  799. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  800. */
  801. if (unlikely(left < 2))
  802. left = 2;
  803. if (left > x86_pmu.max_period)
  804. left = x86_pmu.max_period;
  805. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  806. /*
  807. * The hw event starts counting from this event offset,
  808. * mark it to be able to extra future deltas:
  809. */
  810. local64_set(&hwc->prev_count, (u64)-left);
  811. wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
  812. /*
  813. * Due to erratum on certan cpu we need
  814. * a second write to be sure the register
  815. * is updated properly
  816. */
  817. if (x86_pmu.perfctr_second_write) {
  818. wrmsrl(hwc->event_base,
  819. (u64)(-left) & x86_pmu.cntval_mask);
  820. }
  821. perf_event_update_userpage(event);
  822. return ret;
  823. }
  824. void x86_pmu_enable_event(struct perf_event *event)
  825. {
  826. if (__this_cpu_read(cpu_hw_events.enabled))
  827. __x86_pmu_enable_event(&event->hw,
  828. ARCH_PERFMON_EVENTSEL_ENABLE);
  829. }
  830. /*
  831. * Add a single event to the PMU.
  832. *
  833. * The event is added to the group of enabled events
  834. * but only if it can be scehduled with existing events.
  835. */
  836. static int x86_pmu_add(struct perf_event *event, int flags)
  837. {
  838. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  839. struct hw_perf_event *hwc;
  840. int assign[X86_PMC_IDX_MAX];
  841. int n, n0, ret;
  842. hwc = &event->hw;
  843. perf_pmu_disable(event->pmu);
  844. n0 = cpuc->n_events;
  845. ret = n = collect_events(cpuc, event, false);
  846. if (ret < 0)
  847. goto out;
  848. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  849. if (!(flags & PERF_EF_START))
  850. hwc->state |= PERF_HES_ARCH;
  851. /*
  852. * If group events scheduling transaction was started,
  853. * skip the schedulability test here, it will be performed
  854. * at commit time (->commit_txn) as a whole
  855. */
  856. if (cpuc->group_flag & PERF_EVENT_TXN)
  857. goto done_collect;
  858. ret = x86_pmu.schedule_events(cpuc, n, assign);
  859. if (ret)
  860. goto out;
  861. /*
  862. * copy new assignment, now we know it is possible
  863. * will be used by hw_perf_enable()
  864. */
  865. memcpy(cpuc->assign, assign, n*sizeof(int));
  866. done_collect:
  867. cpuc->n_events = n;
  868. cpuc->n_added += n - n0;
  869. cpuc->n_txn += n - n0;
  870. ret = 0;
  871. out:
  872. perf_pmu_enable(event->pmu);
  873. return ret;
  874. }
  875. static void x86_pmu_start(struct perf_event *event, int flags)
  876. {
  877. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  878. int idx = event->hw.idx;
  879. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  880. return;
  881. if (WARN_ON_ONCE(idx == -1))
  882. return;
  883. if (flags & PERF_EF_RELOAD) {
  884. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  885. x86_perf_event_set_period(event);
  886. }
  887. event->hw.state = 0;
  888. cpuc->events[idx] = event;
  889. __set_bit(idx, cpuc->active_mask);
  890. __set_bit(idx, cpuc->running);
  891. x86_pmu.enable(event);
  892. perf_event_update_userpage(event);
  893. }
  894. void perf_event_print_debug(void)
  895. {
  896. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  897. u64 pebs;
  898. struct cpu_hw_events *cpuc;
  899. unsigned long flags;
  900. int cpu, idx;
  901. if (!x86_pmu.num_counters)
  902. return;
  903. local_irq_save(flags);
  904. cpu = smp_processor_id();
  905. cpuc = &per_cpu(cpu_hw_events, cpu);
  906. if (x86_pmu.version >= 2) {
  907. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  908. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  909. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  910. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  911. rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
  912. pr_info("\n");
  913. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  914. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  915. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  916. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  917. pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
  918. }
  919. pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
  920. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  921. rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
  922. rdmsrl(x86_pmu_event_addr(idx), pmc_count);
  923. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  924. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  925. cpu, idx, pmc_ctrl);
  926. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  927. cpu, idx, pmc_count);
  928. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  929. cpu, idx, prev_left);
  930. }
  931. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
  932. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  933. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  934. cpu, idx, pmc_count);
  935. }
  936. local_irq_restore(flags);
  937. }
  938. void x86_pmu_stop(struct perf_event *event, int flags)
  939. {
  940. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  941. struct hw_perf_event *hwc = &event->hw;
  942. if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
  943. x86_pmu.disable(event);
  944. cpuc->events[hwc->idx] = NULL;
  945. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  946. hwc->state |= PERF_HES_STOPPED;
  947. }
  948. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  949. /*
  950. * Drain the remaining delta count out of a event
  951. * that we are disabling:
  952. */
  953. x86_perf_event_update(event);
  954. hwc->state |= PERF_HES_UPTODATE;
  955. }
  956. }
  957. static void x86_pmu_del(struct perf_event *event, int flags)
  958. {
  959. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  960. int i;
  961. /*
  962. * event is descheduled
  963. */
  964. event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
  965. /*
  966. * If we're called during a txn, we don't need to do anything.
  967. * The events never got scheduled and ->cancel_txn will truncate
  968. * the event_list.
  969. */
  970. if (cpuc->group_flag & PERF_EVENT_TXN)
  971. return;
  972. x86_pmu_stop(event, PERF_EF_UPDATE);
  973. for (i = 0; i < cpuc->n_events; i++) {
  974. if (event == cpuc->event_list[i]) {
  975. if (x86_pmu.put_event_constraints)
  976. x86_pmu.put_event_constraints(cpuc, event);
  977. while (++i < cpuc->n_events)
  978. cpuc->event_list[i-1] = cpuc->event_list[i];
  979. --cpuc->n_events;
  980. break;
  981. }
  982. }
  983. perf_event_update_userpage(event);
  984. }
  985. int x86_pmu_handle_irq(struct pt_regs *regs)
  986. {
  987. struct perf_sample_data data;
  988. struct cpu_hw_events *cpuc;
  989. struct perf_event *event;
  990. int idx, handled = 0;
  991. u64 val;
  992. cpuc = &__get_cpu_var(cpu_hw_events);
  993. /*
  994. * Some chipsets need to unmask the LVTPC in a particular spot
  995. * inside the nmi handler. As a result, the unmasking was pushed
  996. * into all the nmi handlers.
  997. *
  998. * This generic handler doesn't seem to have any issues where the
  999. * unmasking occurs so it was left at the top.
  1000. */
  1001. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1002. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  1003. if (!test_bit(idx, cpuc->active_mask)) {
  1004. /*
  1005. * Though we deactivated the counter some cpus
  1006. * might still deliver spurious interrupts still
  1007. * in flight. Catch them:
  1008. */
  1009. if (__test_and_clear_bit(idx, cpuc->running))
  1010. handled++;
  1011. continue;
  1012. }
  1013. event = cpuc->events[idx];
  1014. val = x86_perf_event_update(event);
  1015. if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
  1016. continue;
  1017. /*
  1018. * event overflow
  1019. */
  1020. handled++;
  1021. perf_sample_data_init(&data, 0, event->hw.last_period);
  1022. if (!x86_perf_event_set_period(event))
  1023. continue;
  1024. if (perf_event_overflow(event, &data, regs))
  1025. x86_pmu_stop(event, 0);
  1026. }
  1027. if (handled)
  1028. inc_irq_stat(apic_perf_irqs);
  1029. return handled;
  1030. }
  1031. void perf_events_lapic_init(void)
  1032. {
  1033. if (!x86_pmu.apic || !x86_pmu_initialized())
  1034. return;
  1035. /*
  1036. * Always use NMI for PMU
  1037. */
  1038. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1039. }
  1040. static int __kprobes
  1041. perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  1042. {
  1043. int ret;
  1044. u64 start_clock;
  1045. u64 finish_clock;
  1046. if (!atomic_read(&active_events))
  1047. return NMI_DONE;
  1048. start_clock = local_clock();
  1049. ret = x86_pmu.handle_irq(regs);
  1050. finish_clock = local_clock();
  1051. perf_sample_event_took(finish_clock - start_clock);
  1052. return ret;
  1053. }
  1054. struct event_constraint emptyconstraint;
  1055. struct event_constraint unconstrained;
  1056. static int
  1057. x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  1058. {
  1059. unsigned int cpu = (long)hcpu;
  1060. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1061. int ret = NOTIFY_OK;
  1062. switch (action & ~CPU_TASKS_FROZEN) {
  1063. case CPU_UP_PREPARE:
  1064. cpuc->kfree_on_online = NULL;
  1065. if (x86_pmu.cpu_prepare)
  1066. ret = x86_pmu.cpu_prepare(cpu);
  1067. break;
  1068. case CPU_STARTING:
  1069. if (x86_pmu.attr_rdpmc)
  1070. set_in_cr4(X86_CR4_PCE);
  1071. if (x86_pmu.cpu_starting)
  1072. x86_pmu.cpu_starting(cpu);
  1073. break;
  1074. case CPU_ONLINE:
  1075. kfree(cpuc->kfree_on_online);
  1076. break;
  1077. case CPU_DYING:
  1078. if (x86_pmu.cpu_dying)
  1079. x86_pmu.cpu_dying(cpu);
  1080. break;
  1081. case CPU_UP_CANCELED:
  1082. case CPU_DEAD:
  1083. if (x86_pmu.cpu_dead)
  1084. x86_pmu.cpu_dead(cpu);
  1085. break;
  1086. default:
  1087. break;
  1088. }
  1089. return ret;
  1090. }
  1091. static void __init pmu_check_apic(void)
  1092. {
  1093. if (cpu_has_apic)
  1094. return;
  1095. x86_pmu.apic = 0;
  1096. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  1097. pr_info("no hardware sampling interrupt available.\n");
  1098. }
  1099. static struct attribute_group x86_pmu_format_group = {
  1100. .name = "format",
  1101. .attrs = NULL,
  1102. };
  1103. /*
  1104. * Remove all undefined events (x86_pmu.event_map(id) == 0)
  1105. * out of events_attr attributes.
  1106. */
  1107. static void __init filter_events(struct attribute **attrs)
  1108. {
  1109. struct device_attribute *d;
  1110. struct perf_pmu_events_attr *pmu_attr;
  1111. int i, j;
  1112. for (i = 0; attrs[i]; i++) {
  1113. d = (struct device_attribute *)attrs[i];
  1114. pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
  1115. /* str trumps id */
  1116. if (pmu_attr->event_str)
  1117. continue;
  1118. if (x86_pmu.event_map(i))
  1119. continue;
  1120. for (j = i; attrs[j]; j++)
  1121. attrs[j] = attrs[j + 1];
  1122. /* Check the shifted attr. */
  1123. i--;
  1124. }
  1125. }
  1126. /* Merge two pointer arrays */
  1127. static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
  1128. {
  1129. struct attribute **new;
  1130. int j, i;
  1131. for (j = 0; a[j]; j++)
  1132. ;
  1133. for (i = 0; b[i]; i++)
  1134. j++;
  1135. j++;
  1136. new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
  1137. if (!new)
  1138. return NULL;
  1139. j = 0;
  1140. for (i = 0; a[i]; i++)
  1141. new[j++] = a[i];
  1142. for (i = 0; b[i]; i++)
  1143. new[j++] = b[i];
  1144. new[j] = NULL;
  1145. return new;
  1146. }
  1147. ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
  1148. char *page)
  1149. {
  1150. struct perf_pmu_events_attr *pmu_attr = \
  1151. container_of(attr, struct perf_pmu_events_attr, attr);
  1152. u64 config = x86_pmu.event_map(pmu_attr->id);
  1153. /* string trumps id */
  1154. if (pmu_attr->event_str)
  1155. return sprintf(page, "%s", pmu_attr->event_str);
  1156. return x86_pmu.events_sysfs_show(page, config);
  1157. }
  1158. EVENT_ATTR(cpu-cycles, CPU_CYCLES );
  1159. EVENT_ATTR(instructions, INSTRUCTIONS );
  1160. EVENT_ATTR(cache-references, CACHE_REFERENCES );
  1161. EVENT_ATTR(cache-misses, CACHE_MISSES );
  1162. EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
  1163. EVENT_ATTR(branch-misses, BRANCH_MISSES );
  1164. EVENT_ATTR(bus-cycles, BUS_CYCLES );
  1165. EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
  1166. EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
  1167. EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
  1168. static struct attribute *empty_attrs;
  1169. static struct attribute *events_attr[] = {
  1170. EVENT_PTR(CPU_CYCLES),
  1171. EVENT_PTR(INSTRUCTIONS),
  1172. EVENT_PTR(CACHE_REFERENCES),
  1173. EVENT_PTR(CACHE_MISSES),
  1174. EVENT_PTR(BRANCH_INSTRUCTIONS),
  1175. EVENT_PTR(BRANCH_MISSES),
  1176. EVENT_PTR(BUS_CYCLES),
  1177. EVENT_PTR(STALLED_CYCLES_FRONTEND),
  1178. EVENT_PTR(STALLED_CYCLES_BACKEND),
  1179. EVENT_PTR(REF_CPU_CYCLES),
  1180. NULL,
  1181. };
  1182. static struct attribute_group x86_pmu_events_group = {
  1183. .name = "events",
  1184. .attrs = events_attr,
  1185. };
  1186. ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
  1187. {
  1188. u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
  1189. u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
  1190. bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
  1191. bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
  1192. bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
  1193. bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
  1194. ssize_t ret;
  1195. /*
  1196. * We have whole page size to spend and just little data
  1197. * to write, so we can safely use sprintf.
  1198. */
  1199. ret = sprintf(page, "event=0x%02llx", event);
  1200. if (umask)
  1201. ret += sprintf(page + ret, ",umask=0x%02llx", umask);
  1202. if (edge)
  1203. ret += sprintf(page + ret, ",edge");
  1204. if (pc)
  1205. ret += sprintf(page + ret, ",pc");
  1206. if (any)
  1207. ret += sprintf(page + ret, ",any");
  1208. if (inv)
  1209. ret += sprintf(page + ret, ",inv");
  1210. if (cmask)
  1211. ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
  1212. ret += sprintf(page + ret, "\n");
  1213. return ret;
  1214. }
  1215. static int __init init_hw_perf_events(void)
  1216. {
  1217. struct x86_pmu_quirk *quirk;
  1218. int err;
  1219. pr_info("Performance Events: ");
  1220. switch (boot_cpu_data.x86_vendor) {
  1221. case X86_VENDOR_INTEL:
  1222. err = intel_pmu_init();
  1223. break;
  1224. case X86_VENDOR_AMD:
  1225. err = amd_pmu_init();
  1226. break;
  1227. default:
  1228. err = -ENOTSUPP;
  1229. }
  1230. if (err != 0) {
  1231. pr_cont("no PMU driver, software events only.\n");
  1232. return 0;
  1233. }
  1234. pmu_check_apic();
  1235. /* sanity check that the hardware exists or is emulated */
  1236. if (!check_hw_exists())
  1237. return 0;
  1238. pr_cont("%s PMU driver.\n", x86_pmu.name);
  1239. for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
  1240. quirk->func();
  1241. if (!x86_pmu.intel_ctrl)
  1242. x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
  1243. perf_events_lapic_init();
  1244. register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
  1245. unconstrained = (struct event_constraint)
  1246. __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
  1247. 0, x86_pmu.num_counters, 0, 0);
  1248. x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
  1249. x86_pmu_format_group.attrs = x86_pmu.format_attrs;
  1250. if (x86_pmu.event_attrs)
  1251. x86_pmu_events_group.attrs = x86_pmu.event_attrs;
  1252. if (!x86_pmu.events_sysfs_show)
  1253. x86_pmu_events_group.attrs = &empty_attrs;
  1254. else
  1255. filter_events(x86_pmu_events_group.attrs);
  1256. if (x86_pmu.cpu_events) {
  1257. struct attribute **tmp;
  1258. tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
  1259. if (!WARN_ON(!tmp))
  1260. x86_pmu_events_group.attrs = tmp;
  1261. }
  1262. pr_info("... version: %d\n", x86_pmu.version);
  1263. pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
  1264. pr_info("... generic registers: %d\n", x86_pmu.num_counters);
  1265. pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
  1266. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  1267. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
  1268. pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
  1269. perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  1270. perf_cpu_notifier(x86_pmu_notifier);
  1271. return 0;
  1272. }
  1273. early_initcall(init_hw_perf_events);
  1274. static inline void x86_pmu_read(struct perf_event *event)
  1275. {
  1276. x86_perf_event_update(event);
  1277. }
  1278. /*
  1279. * Start group events scheduling transaction
  1280. * Set the flag to make pmu::enable() not perform the
  1281. * schedulability test, it will be performed at commit time
  1282. */
  1283. static void x86_pmu_start_txn(struct pmu *pmu)
  1284. {
  1285. perf_pmu_disable(pmu);
  1286. __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
  1287. __this_cpu_write(cpu_hw_events.n_txn, 0);
  1288. }
  1289. /*
  1290. * Stop group events scheduling transaction
  1291. * Clear the flag and pmu::enable() will perform the
  1292. * schedulability test.
  1293. */
  1294. static void x86_pmu_cancel_txn(struct pmu *pmu)
  1295. {
  1296. __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
  1297. /*
  1298. * Truncate the collected events.
  1299. */
  1300. __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
  1301. __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
  1302. perf_pmu_enable(pmu);
  1303. }
  1304. /*
  1305. * Commit group events scheduling transaction
  1306. * Perform the group schedulability test as a whole
  1307. * Return 0 if success
  1308. */
  1309. static int x86_pmu_commit_txn(struct pmu *pmu)
  1310. {
  1311. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1312. int assign[X86_PMC_IDX_MAX];
  1313. int n, ret;
  1314. n = cpuc->n_events;
  1315. if (!x86_pmu_initialized())
  1316. return -EAGAIN;
  1317. ret = x86_pmu.schedule_events(cpuc, n, assign);
  1318. if (ret)
  1319. return ret;
  1320. /*
  1321. * copy new assignment, now we know it is possible
  1322. * will be used by hw_perf_enable()
  1323. */
  1324. memcpy(cpuc->assign, assign, n*sizeof(int));
  1325. cpuc->group_flag &= ~PERF_EVENT_TXN;
  1326. perf_pmu_enable(pmu);
  1327. return 0;
  1328. }
  1329. /*
  1330. * a fake_cpuc is used to validate event groups. Due to
  1331. * the extra reg logic, we need to also allocate a fake
  1332. * per_core and per_cpu structure. Otherwise, group events
  1333. * using extra reg may conflict without the kernel being
  1334. * able to catch this when the last event gets added to
  1335. * the group.
  1336. */
  1337. static void free_fake_cpuc(struct cpu_hw_events *cpuc)
  1338. {
  1339. kfree(cpuc->shared_regs);
  1340. kfree(cpuc);
  1341. }
  1342. static struct cpu_hw_events *allocate_fake_cpuc(void)
  1343. {
  1344. struct cpu_hw_events *cpuc;
  1345. int cpu = raw_smp_processor_id();
  1346. cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
  1347. if (!cpuc)
  1348. return ERR_PTR(-ENOMEM);
  1349. /* only needed, if we have extra_regs */
  1350. if (x86_pmu.extra_regs) {
  1351. cpuc->shared_regs = allocate_shared_regs(cpu);
  1352. if (!cpuc->shared_regs)
  1353. goto error;
  1354. }
  1355. cpuc->is_fake = 1;
  1356. return cpuc;
  1357. error:
  1358. free_fake_cpuc(cpuc);
  1359. return ERR_PTR(-ENOMEM);
  1360. }
  1361. /*
  1362. * validate that we can schedule this event
  1363. */
  1364. static int validate_event(struct perf_event *event)
  1365. {
  1366. struct cpu_hw_events *fake_cpuc;
  1367. struct event_constraint *c;
  1368. int ret = 0;
  1369. fake_cpuc = allocate_fake_cpuc();
  1370. if (IS_ERR(fake_cpuc))
  1371. return PTR_ERR(fake_cpuc);
  1372. c = x86_pmu.get_event_constraints(fake_cpuc, event);
  1373. if (!c || !c->weight)
  1374. ret = -EINVAL;
  1375. if (x86_pmu.put_event_constraints)
  1376. x86_pmu.put_event_constraints(fake_cpuc, event);
  1377. free_fake_cpuc(fake_cpuc);
  1378. return ret;
  1379. }
  1380. /*
  1381. * validate a single event group
  1382. *
  1383. * validation include:
  1384. * - check events are compatible which each other
  1385. * - events do not compete for the same counter
  1386. * - number of events <= number of counters
  1387. *
  1388. * validation ensures the group can be loaded onto the
  1389. * PMU if it was the only group available.
  1390. */
  1391. static int validate_group(struct perf_event *event)
  1392. {
  1393. struct perf_event *leader = event->group_leader;
  1394. struct cpu_hw_events *fake_cpuc;
  1395. int ret = -EINVAL, n;
  1396. fake_cpuc = allocate_fake_cpuc();
  1397. if (IS_ERR(fake_cpuc))
  1398. return PTR_ERR(fake_cpuc);
  1399. /*
  1400. * the event is not yet connected with its
  1401. * siblings therefore we must first collect
  1402. * existing siblings, then add the new event
  1403. * before we can simulate the scheduling
  1404. */
  1405. n = collect_events(fake_cpuc, leader, true);
  1406. if (n < 0)
  1407. goto out;
  1408. fake_cpuc->n_events = n;
  1409. n = collect_events(fake_cpuc, event, false);
  1410. if (n < 0)
  1411. goto out;
  1412. fake_cpuc->n_events = n;
  1413. ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
  1414. out:
  1415. free_fake_cpuc(fake_cpuc);
  1416. return ret;
  1417. }
  1418. static int x86_pmu_event_init(struct perf_event *event)
  1419. {
  1420. struct pmu *tmp;
  1421. int err;
  1422. switch (event->attr.type) {
  1423. case PERF_TYPE_RAW:
  1424. case PERF_TYPE_HARDWARE:
  1425. case PERF_TYPE_HW_CACHE:
  1426. break;
  1427. default:
  1428. return -ENOENT;
  1429. }
  1430. err = __x86_pmu_event_init(event);
  1431. if (!err) {
  1432. /*
  1433. * we temporarily connect event to its pmu
  1434. * such that validate_group() can classify
  1435. * it as an x86 event using is_x86_event()
  1436. */
  1437. tmp = event->pmu;
  1438. event->pmu = &pmu;
  1439. if (event->group_leader != event)
  1440. err = validate_group(event);
  1441. else
  1442. err = validate_event(event);
  1443. event->pmu = tmp;
  1444. }
  1445. if (err) {
  1446. if (event->destroy)
  1447. event->destroy(event);
  1448. }
  1449. return err;
  1450. }
  1451. static int x86_pmu_event_idx(struct perf_event *event)
  1452. {
  1453. int idx = event->hw.idx;
  1454. if (!x86_pmu.attr_rdpmc)
  1455. return 0;
  1456. if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
  1457. idx -= INTEL_PMC_IDX_FIXED;
  1458. idx |= 1 << 30;
  1459. }
  1460. return idx + 1;
  1461. }
  1462. static ssize_t get_attr_rdpmc(struct device *cdev,
  1463. struct device_attribute *attr,
  1464. char *buf)
  1465. {
  1466. return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
  1467. }
  1468. static void change_rdpmc(void *info)
  1469. {
  1470. bool enable = !!(unsigned long)info;
  1471. if (enable)
  1472. set_in_cr4(X86_CR4_PCE);
  1473. else
  1474. clear_in_cr4(X86_CR4_PCE);
  1475. }
  1476. static ssize_t set_attr_rdpmc(struct device *cdev,
  1477. struct device_attribute *attr,
  1478. const char *buf, size_t count)
  1479. {
  1480. unsigned long val;
  1481. ssize_t ret;
  1482. ret = kstrtoul(buf, 0, &val);
  1483. if (ret)
  1484. return ret;
  1485. if (!!val != !!x86_pmu.attr_rdpmc) {
  1486. x86_pmu.attr_rdpmc = !!val;
  1487. smp_call_function(change_rdpmc, (void *)val, 1);
  1488. }
  1489. return count;
  1490. }
  1491. static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
  1492. static struct attribute *x86_pmu_attrs[] = {
  1493. &dev_attr_rdpmc.attr,
  1494. NULL,
  1495. };
  1496. static struct attribute_group x86_pmu_attr_group = {
  1497. .attrs = x86_pmu_attrs,
  1498. };
  1499. static const struct attribute_group *x86_pmu_attr_groups[] = {
  1500. &x86_pmu_attr_group,
  1501. &x86_pmu_format_group,
  1502. &x86_pmu_events_group,
  1503. NULL,
  1504. };
  1505. static void x86_pmu_flush_branch_stack(void)
  1506. {
  1507. if (x86_pmu.flush_branch_stack)
  1508. x86_pmu.flush_branch_stack();
  1509. }
  1510. void perf_check_microcode(void)
  1511. {
  1512. if (x86_pmu.check_microcode)
  1513. x86_pmu.check_microcode();
  1514. }
  1515. EXPORT_SYMBOL_GPL(perf_check_microcode);
  1516. static struct pmu pmu = {
  1517. .pmu_enable = x86_pmu_enable,
  1518. .pmu_disable = x86_pmu_disable,
  1519. .attr_groups = x86_pmu_attr_groups,
  1520. .event_init = x86_pmu_event_init,
  1521. .add = x86_pmu_add,
  1522. .del = x86_pmu_del,
  1523. .start = x86_pmu_start,
  1524. .stop = x86_pmu_stop,
  1525. .read = x86_pmu_read,
  1526. .start_txn = x86_pmu_start_txn,
  1527. .cancel_txn = x86_pmu_cancel_txn,
  1528. .commit_txn = x86_pmu_commit_txn,
  1529. .event_idx = x86_pmu_event_idx,
  1530. .flush_branch_stack = x86_pmu_flush_branch_stack,
  1531. };
  1532. void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
  1533. {
  1534. userpg->cap_user_time = 0;
  1535. userpg->cap_user_time_zero = 0;
  1536. userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
  1537. userpg->pmc_width = x86_pmu.cntval_bits;
  1538. if (!sched_clock_stable)
  1539. return;
  1540. userpg->cap_user_time = 1;
  1541. userpg->time_mult = this_cpu_read(cyc2ns);
  1542. userpg->time_shift = CYC2NS_SCALE_FACTOR;
  1543. userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
  1544. userpg->cap_user_time_zero = 1;
  1545. userpg->time_zero = this_cpu_read(cyc2ns_offset);
  1546. }
  1547. /*
  1548. * callchain support
  1549. */
  1550. static int backtrace_stack(void *data, char *name)
  1551. {
  1552. return 0;
  1553. }
  1554. static void backtrace_address(void *data, unsigned long addr, int reliable)
  1555. {
  1556. struct perf_callchain_entry *entry = data;
  1557. perf_callchain_store(entry, addr);
  1558. }
  1559. static const struct stacktrace_ops backtrace_ops = {
  1560. .stack = backtrace_stack,
  1561. .address = backtrace_address,
  1562. .walk_stack = print_context_stack_bp,
  1563. };
  1564. void
  1565. perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1566. {
  1567. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1568. /* TODO: We don't support guest os callchain now */
  1569. return;
  1570. }
  1571. perf_callchain_store(entry, regs->ip);
  1572. dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
  1573. }
  1574. static inline int
  1575. valid_user_frame(const void __user *fp, unsigned long size)
  1576. {
  1577. return (__range_not_ok(fp, size, TASK_SIZE) == 0);
  1578. }
  1579. static unsigned long get_segment_base(unsigned int segment)
  1580. {
  1581. struct desc_struct *desc;
  1582. int idx = segment >> 3;
  1583. if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
  1584. if (idx > LDT_ENTRIES)
  1585. return 0;
  1586. if (idx > current->active_mm->context.size)
  1587. return 0;
  1588. desc = current->active_mm->context.ldt;
  1589. } else {
  1590. if (idx > GDT_ENTRIES)
  1591. return 0;
  1592. desc = __this_cpu_ptr(&gdt_page.gdt[0]);
  1593. }
  1594. return get_desc_base(desc + idx);
  1595. }
  1596. #ifdef CONFIG_COMPAT
  1597. #include <asm/compat.h>
  1598. static inline int
  1599. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1600. {
  1601. /* 32-bit process in 64-bit kernel. */
  1602. unsigned long ss_base, cs_base;
  1603. struct stack_frame_ia32 frame;
  1604. const void __user *fp;
  1605. if (!test_thread_flag(TIF_IA32))
  1606. return 0;
  1607. cs_base = get_segment_base(regs->cs);
  1608. ss_base = get_segment_base(regs->ss);
  1609. fp = compat_ptr(ss_base + regs->bp);
  1610. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1611. unsigned long bytes;
  1612. frame.next_frame = 0;
  1613. frame.return_address = 0;
  1614. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1615. if (bytes != sizeof(frame))
  1616. break;
  1617. if (!valid_user_frame(fp, sizeof(frame)))
  1618. break;
  1619. perf_callchain_store(entry, cs_base + frame.return_address);
  1620. fp = compat_ptr(ss_base + frame.next_frame);
  1621. }
  1622. return 1;
  1623. }
  1624. #else
  1625. static inline int
  1626. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1627. {
  1628. return 0;
  1629. }
  1630. #endif
  1631. void
  1632. perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1633. {
  1634. struct stack_frame frame;
  1635. const void __user *fp;
  1636. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1637. /* TODO: We don't support guest os callchain now */
  1638. return;
  1639. }
  1640. /*
  1641. * We don't know what to do with VM86 stacks.. ignore them for now.
  1642. */
  1643. if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
  1644. return;
  1645. fp = (void __user *)regs->bp;
  1646. perf_callchain_store(entry, regs->ip);
  1647. if (!current->mm)
  1648. return;
  1649. if (perf_callchain_user32(regs, entry))
  1650. return;
  1651. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1652. unsigned long bytes;
  1653. frame.next_frame = NULL;
  1654. frame.return_address = 0;
  1655. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1656. if (bytes != sizeof(frame))
  1657. break;
  1658. if (!valid_user_frame(fp, sizeof(frame)))
  1659. break;
  1660. perf_callchain_store(entry, frame.return_address);
  1661. fp = frame.next_frame;
  1662. }
  1663. }
  1664. /*
  1665. * Deal with code segment offsets for the various execution modes:
  1666. *
  1667. * VM86 - the good olde 16 bit days, where the linear address is
  1668. * 20 bits and we use regs->ip + 0x10 * regs->cs.
  1669. *
  1670. * IA32 - Where we need to look at GDT/LDT segment descriptor tables
  1671. * to figure out what the 32bit base address is.
  1672. *
  1673. * X32 - has TIF_X32 set, but is running in x86_64
  1674. *
  1675. * X86_64 - CS,DS,SS,ES are all zero based.
  1676. */
  1677. static unsigned long code_segment_base(struct pt_regs *regs)
  1678. {
  1679. /*
  1680. * If we are in VM86 mode, add the segment offset to convert to a
  1681. * linear address.
  1682. */
  1683. if (regs->flags & X86_VM_MASK)
  1684. return 0x10 * regs->cs;
  1685. /*
  1686. * For IA32 we look at the GDT/LDT segment base to convert the
  1687. * effective IP to a linear address.
  1688. */
  1689. #ifdef CONFIG_X86_32
  1690. if (user_mode(regs) && regs->cs != __USER_CS)
  1691. return get_segment_base(regs->cs);
  1692. #else
  1693. if (test_thread_flag(TIF_IA32)) {
  1694. if (user_mode(regs) && regs->cs != __USER32_CS)
  1695. return get_segment_base(regs->cs);
  1696. }
  1697. #endif
  1698. return 0;
  1699. }
  1700. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1701. {
  1702. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1703. return perf_guest_cbs->get_guest_ip();
  1704. return regs->ip + code_segment_base(regs);
  1705. }
  1706. unsigned long perf_misc_flags(struct pt_regs *regs)
  1707. {
  1708. int misc = 0;
  1709. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1710. if (perf_guest_cbs->is_user_mode())
  1711. misc |= PERF_RECORD_MISC_GUEST_USER;
  1712. else
  1713. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1714. } else {
  1715. if (user_mode(regs))
  1716. misc |= PERF_RECORD_MISC_USER;
  1717. else
  1718. misc |= PERF_RECORD_MISC_KERNEL;
  1719. }
  1720. if (regs->flags & PERF_EFLAGS_EXACT)
  1721. misc |= PERF_RECORD_MISC_EXACT_IP;
  1722. return misc;
  1723. }
  1724. void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  1725. {
  1726. cap->version = x86_pmu.version;
  1727. cap->num_counters_gp = x86_pmu.num_counters;
  1728. cap->num_counters_fixed = x86_pmu.num_counters_fixed;
  1729. cap->bit_width_gp = x86_pmu.cntval_bits;
  1730. cap->bit_width_fixed = x86_pmu.cntval_bits;
  1731. cap->events_mask = (unsigned int)x86_pmu.events_maskl;
  1732. cap->events_mask_len = x86_pmu.events_mask_len;
  1733. }
  1734. EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);