perf_event.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. #include <linux/capability.h>
  16. #include <linux/notifier.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/module.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/sched.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/slab.h>
  24. #include <linux/cpu.h>
  25. #include <linux/bitops.h>
  26. #include <linux/device.h>
  27. #include <asm/apic.h>
  28. #include <asm/stacktrace.h>
  29. #include <asm/nmi.h>
  30. #include <asm/smp.h>
  31. #include <asm/alternative.h>
  32. #include <asm/timer.h>
  33. #include "perf_event.h"
  34. #if 0
  35. #undef wrmsrl
  36. #define wrmsrl(msr, val) \
  37. do { \
  38. trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
  39. (unsigned long)(val)); \
  40. native_write_msr((msr), (u32)((u64)(val)), \
  41. (u32)((u64)(val) >> 32)); \
  42. } while (0)
  43. #endif
  44. struct x86_pmu x86_pmu __read_mostly;
  45. DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  46. .enabled = 1,
  47. };
  48. u64 __read_mostly hw_cache_event_ids
  49. [PERF_COUNT_HW_CACHE_MAX]
  50. [PERF_COUNT_HW_CACHE_OP_MAX]
  51. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  52. u64 __read_mostly hw_cache_extra_regs
  53. [PERF_COUNT_HW_CACHE_MAX]
  54. [PERF_COUNT_HW_CACHE_OP_MAX]
  55. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  56. /*
  57. * Propagate event elapsed time into the generic event.
  58. * Can only be executed on the CPU where the event is active.
  59. * Returns the delta events processed.
  60. */
  61. u64 x86_perf_event_update(struct perf_event *event)
  62. {
  63. struct hw_perf_event *hwc = &event->hw;
  64. int shift = 64 - x86_pmu.cntval_bits;
  65. u64 prev_raw_count, new_raw_count;
  66. int idx = hwc->idx;
  67. s64 delta;
  68. if (idx == X86_PMC_IDX_FIXED_BTS)
  69. return 0;
  70. /*
  71. * Careful: an NMI might modify the previous event value.
  72. *
  73. * Our tactic to handle this is to first atomically read and
  74. * exchange a new raw count - then add that new-prev delta
  75. * count to the generic event atomically:
  76. */
  77. again:
  78. prev_raw_count = local64_read(&hwc->prev_count);
  79. rdmsrl(hwc->event_base, new_raw_count);
  80. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  81. new_raw_count) != prev_raw_count)
  82. goto again;
  83. /*
  84. * Now we have the new raw value and have updated the prev
  85. * timestamp already. We can now calculate the elapsed delta
  86. * (event-)time and add that to the generic event.
  87. *
  88. * Careful, not all hw sign-extends above the physical width
  89. * of the count.
  90. */
  91. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  92. delta >>= shift;
  93. local64_add(delta, &event->count);
  94. local64_sub(delta, &hwc->period_left);
  95. return new_raw_count;
  96. }
  97. /*
  98. * Find and validate any extra registers to set up.
  99. */
  100. static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
  101. {
  102. struct hw_perf_event_extra *reg;
  103. struct extra_reg *er;
  104. reg = &event->hw.extra_reg;
  105. if (!x86_pmu.extra_regs)
  106. return 0;
  107. for (er = x86_pmu.extra_regs; er->msr; er++) {
  108. if (er->event != (config & er->config_mask))
  109. continue;
  110. if (event->attr.config1 & ~er->valid_mask)
  111. return -EINVAL;
  112. reg->idx = er->idx;
  113. reg->config = event->attr.config1;
  114. reg->reg = er->msr;
  115. break;
  116. }
  117. return 0;
  118. }
  119. static atomic_t active_events;
  120. static DEFINE_MUTEX(pmc_reserve_mutex);
  121. #ifdef CONFIG_X86_LOCAL_APIC
  122. static bool reserve_pmc_hardware(void)
  123. {
  124. int i;
  125. for (i = 0; i < x86_pmu.num_counters; i++) {
  126. if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
  127. goto perfctr_fail;
  128. }
  129. for (i = 0; i < x86_pmu.num_counters; i++) {
  130. if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
  131. goto eventsel_fail;
  132. }
  133. return true;
  134. eventsel_fail:
  135. for (i--; i >= 0; i--)
  136. release_evntsel_nmi(x86_pmu_config_addr(i));
  137. i = x86_pmu.num_counters;
  138. perfctr_fail:
  139. for (i--; i >= 0; i--)
  140. release_perfctr_nmi(x86_pmu_event_addr(i));
  141. return false;
  142. }
  143. static void release_pmc_hardware(void)
  144. {
  145. int i;
  146. for (i = 0; i < x86_pmu.num_counters; i++) {
  147. release_perfctr_nmi(x86_pmu_event_addr(i));
  148. release_evntsel_nmi(x86_pmu_config_addr(i));
  149. }
  150. }
  151. #else
  152. static bool reserve_pmc_hardware(void) { return true; }
  153. static void release_pmc_hardware(void) {}
  154. #endif
  155. static bool check_hw_exists(void)
  156. {
  157. u64 val, val_new = 0;
  158. int i, reg, ret = 0;
  159. /*
  160. * Check to see if the BIOS enabled any of the counters, if so
  161. * complain and bail.
  162. */
  163. for (i = 0; i < x86_pmu.num_counters; i++) {
  164. reg = x86_pmu_config_addr(i);
  165. ret = rdmsrl_safe(reg, &val);
  166. if (ret)
  167. goto msr_fail;
  168. if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
  169. goto bios_fail;
  170. }
  171. if (x86_pmu.num_counters_fixed) {
  172. reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  173. ret = rdmsrl_safe(reg, &val);
  174. if (ret)
  175. goto msr_fail;
  176. for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
  177. if (val & (0x03 << i*4))
  178. goto bios_fail;
  179. }
  180. }
  181. /*
  182. * Now write a value and read it back to see if it matches,
  183. * this is needed to detect certain hardware emulators (qemu/kvm)
  184. * that don't trap on the MSR access and always return 0s.
  185. */
  186. val = 0xabcdUL;
  187. ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
  188. ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
  189. if (ret || val != val_new)
  190. goto msr_fail;
  191. return true;
  192. bios_fail:
  193. /*
  194. * We still allow the PMU driver to operate:
  195. */
  196. printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
  197. printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
  198. return true;
  199. msr_fail:
  200. printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
  201. return false;
  202. }
  203. static void hw_perf_event_destroy(struct perf_event *event)
  204. {
  205. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  206. release_pmc_hardware();
  207. release_ds_buffers();
  208. mutex_unlock(&pmc_reserve_mutex);
  209. }
  210. }
  211. static inline int x86_pmu_initialized(void)
  212. {
  213. return x86_pmu.handle_irq != NULL;
  214. }
  215. static inline int
  216. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
  217. {
  218. struct perf_event_attr *attr = &event->attr;
  219. unsigned int cache_type, cache_op, cache_result;
  220. u64 config, val;
  221. config = attr->config;
  222. cache_type = (config >> 0) & 0xff;
  223. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  224. return -EINVAL;
  225. cache_op = (config >> 8) & 0xff;
  226. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  227. return -EINVAL;
  228. cache_result = (config >> 16) & 0xff;
  229. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  230. return -EINVAL;
  231. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  232. if (val == 0)
  233. return -ENOENT;
  234. if (val == -1)
  235. return -EINVAL;
  236. hwc->config |= val;
  237. attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
  238. return x86_pmu_extra_regs(val, event);
  239. }
  240. int x86_setup_perfctr(struct perf_event *event)
  241. {
  242. struct perf_event_attr *attr = &event->attr;
  243. struct hw_perf_event *hwc = &event->hw;
  244. u64 config;
  245. if (!is_sampling_event(event)) {
  246. hwc->sample_period = x86_pmu.max_period;
  247. hwc->last_period = hwc->sample_period;
  248. local64_set(&hwc->period_left, hwc->sample_period);
  249. } else {
  250. /*
  251. * If we have a PMU initialized but no APIC
  252. * interrupts, we cannot sample hardware
  253. * events (user-space has to fall back and
  254. * sample via a hrtimer based software event):
  255. */
  256. if (!x86_pmu.apic)
  257. return -EOPNOTSUPP;
  258. }
  259. if (attr->type == PERF_TYPE_RAW)
  260. return x86_pmu_extra_regs(event->attr.config, event);
  261. if (attr->type == PERF_TYPE_HW_CACHE)
  262. return set_ext_hw_attr(hwc, event);
  263. if (attr->config >= x86_pmu.max_events)
  264. return -EINVAL;
  265. /*
  266. * The generic map:
  267. */
  268. config = x86_pmu.event_map(attr->config);
  269. if (config == 0)
  270. return -ENOENT;
  271. if (config == -1LL)
  272. return -EINVAL;
  273. /*
  274. * Branch tracing:
  275. */
  276. if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
  277. !attr->freq && hwc->sample_period == 1) {
  278. /* BTS is not supported by this architecture. */
  279. if (!x86_pmu.bts_active)
  280. return -EOPNOTSUPP;
  281. /* BTS is currently only allowed for user-mode. */
  282. if (!attr->exclude_kernel)
  283. return -EOPNOTSUPP;
  284. }
  285. hwc->config |= config;
  286. return 0;
  287. }
  288. /*
  289. * check that branch_sample_type is compatible with
  290. * settings needed for precise_ip > 1 which implies
  291. * using the LBR to capture ALL taken branches at the
  292. * priv levels of the measurement
  293. */
  294. static inline int precise_br_compat(struct perf_event *event)
  295. {
  296. u64 m = event->attr.branch_sample_type;
  297. u64 b = 0;
  298. /* must capture all branches */
  299. if (!(m & PERF_SAMPLE_BRANCH_ANY))
  300. return 0;
  301. m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
  302. if (!event->attr.exclude_user)
  303. b |= PERF_SAMPLE_BRANCH_USER;
  304. if (!event->attr.exclude_kernel)
  305. b |= PERF_SAMPLE_BRANCH_KERNEL;
  306. /*
  307. * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
  308. */
  309. return m == b;
  310. }
  311. int x86_pmu_hw_config(struct perf_event *event)
  312. {
  313. if (event->attr.precise_ip) {
  314. int precise = 0;
  315. /* Support for constant skid */
  316. if (x86_pmu.pebs_active) {
  317. precise++;
  318. /* Support for IP fixup */
  319. if (x86_pmu.lbr_nr)
  320. precise++;
  321. }
  322. if (event->attr.precise_ip > precise)
  323. return -EOPNOTSUPP;
  324. /*
  325. * check that PEBS LBR correction does not conflict with
  326. * whatever the user is asking with attr->branch_sample_type
  327. */
  328. if (event->attr.precise_ip > 1) {
  329. u64 *br_type = &event->attr.branch_sample_type;
  330. if (has_branch_stack(event)) {
  331. if (!precise_br_compat(event))
  332. return -EOPNOTSUPP;
  333. /* branch_sample_type is compatible */
  334. } else {
  335. /*
  336. * user did not specify branch_sample_type
  337. *
  338. * For PEBS fixups, we capture all
  339. * the branches at the priv level of the
  340. * event.
  341. */
  342. *br_type = PERF_SAMPLE_BRANCH_ANY;
  343. if (!event->attr.exclude_user)
  344. *br_type |= PERF_SAMPLE_BRANCH_USER;
  345. if (!event->attr.exclude_kernel)
  346. *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
  347. }
  348. }
  349. }
  350. /*
  351. * Generate PMC IRQs:
  352. * (keep 'enabled' bit clear for now)
  353. */
  354. event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
  355. /*
  356. * Count user and OS events unless requested not to
  357. */
  358. if (!event->attr.exclude_user)
  359. event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
  360. if (!event->attr.exclude_kernel)
  361. event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
  362. if (event->attr.type == PERF_TYPE_RAW)
  363. event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
  364. return x86_setup_perfctr(event);
  365. }
  366. /*
  367. * Setup the hardware configuration for a given attr_type
  368. */
  369. static int __x86_pmu_event_init(struct perf_event *event)
  370. {
  371. int err;
  372. if (!x86_pmu_initialized())
  373. return -ENODEV;
  374. err = 0;
  375. if (!atomic_inc_not_zero(&active_events)) {
  376. mutex_lock(&pmc_reserve_mutex);
  377. if (atomic_read(&active_events) == 0) {
  378. if (!reserve_pmc_hardware())
  379. err = -EBUSY;
  380. else
  381. reserve_ds_buffers();
  382. }
  383. if (!err)
  384. atomic_inc(&active_events);
  385. mutex_unlock(&pmc_reserve_mutex);
  386. }
  387. if (err)
  388. return err;
  389. event->destroy = hw_perf_event_destroy;
  390. event->hw.idx = -1;
  391. event->hw.last_cpu = -1;
  392. event->hw.last_tag = ~0ULL;
  393. /* mark unused */
  394. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  395. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  396. return x86_pmu.hw_config(event);
  397. }
  398. void x86_pmu_disable_all(void)
  399. {
  400. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  401. int idx;
  402. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  403. u64 val;
  404. if (!test_bit(idx, cpuc->active_mask))
  405. continue;
  406. rdmsrl(x86_pmu_config_addr(idx), val);
  407. if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
  408. continue;
  409. val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  410. wrmsrl(x86_pmu_config_addr(idx), val);
  411. }
  412. }
  413. static void x86_pmu_disable(struct pmu *pmu)
  414. {
  415. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  416. if (!x86_pmu_initialized())
  417. return;
  418. if (!cpuc->enabled)
  419. return;
  420. cpuc->n_added = 0;
  421. cpuc->enabled = 0;
  422. barrier();
  423. x86_pmu.disable_all();
  424. }
  425. void x86_pmu_enable_all(int added)
  426. {
  427. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  428. int idx;
  429. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  430. struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
  431. if (!test_bit(idx, cpuc->active_mask))
  432. continue;
  433. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  434. }
  435. }
  436. static struct pmu pmu;
  437. static inline int is_x86_event(struct perf_event *event)
  438. {
  439. return event->pmu == &pmu;
  440. }
  441. /*
  442. * Event scheduler state:
  443. *
  444. * Assign events iterating over all events and counters, beginning
  445. * with events with least weights first. Keep the current iterator
  446. * state in struct sched_state.
  447. */
  448. struct sched_state {
  449. int weight;
  450. int event; /* event index */
  451. int counter; /* counter index */
  452. int unassigned; /* number of events to be assigned left */
  453. unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  454. };
  455. /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
  456. #define SCHED_STATES_MAX 2
  457. struct perf_sched {
  458. int max_weight;
  459. int max_events;
  460. struct event_constraint **constraints;
  461. struct sched_state state;
  462. int saved_states;
  463. struct sched_state saved[SCHED_STATES_MAX];
  464. };
  465. /*
  466. * Initialize interator that runs through all events and counters.
  467. */
  468. static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
  469. int num, int wmin, int wmax)
  470. {
  471. int idx;
  472. memset(sched, 0, sizeof(*sched));
  473. sched->max_events = num;
  474. sched->max_weight = wmax;
  475. sched->constraints = c;
  476. for (idx = 0; idx < num; idx++) {
  477. if (c[idx]->weight == wmin)
  478. break;
  479. }
  480. sched->state.event = idx; /* start with min weight */
  481. sched->state.weight = wmin;
  482. sched->state.unassigned = num;
  483. }
  484. static void perf_sched_save_state(struct perf_sched *sched)
  485. {
  486. if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
  487. return;
  488. sched->saved[sched->saved_states] = sched->state;
  489. sched->saved_states++;
  490. }
  491. static bool perf_sched_restore_state(struct perf_sched *sched)
  492. {
  493. if (!sched->saved_states)
  494. return false;
  495. sched->saved_states--;
  496. sched->state = sched->saved[sched->saved_states];
  497. /* continue with next counter: */
  498. clear_bit(sched->state.counter++, sched->state.used);
  499. return true;
  500. }
  501. /*
  502. * Select a counter for the current event to schedule. Return true on
  503. * success.
  504. */
  505. static bool __perf_sched_find_counter(struct perf_sched *sched)
  506. {
  507. struct event_constraint *c;
  508. int idx;
  509. if (!sched->state.unassigned)
  510. return false;
  511. if (sched->state.event >= sched->max_events)
  512. return false;
  513. c = sched->constraints[sched->state.event];
  514. /* Prefer fixed purpose counters */
  515. if (x86_pmu.num_counters_fixed) {
  516. idx = X86_PMC_IDX_FIXED;
  517. for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
  518. if (!__test_and_set_bit(idx, sched->state.used))
  519. goto done;
  520. }
  521. }
  522. /* Grab the first unused counter starting with idx */
  523. idx = sched->state.counter;
  524. for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
  525. if (!__test_and_set_bit(idx, sched->state.used))
  526. goto done;
  527. }
  528. return false;
  529. done:
  530. sched->state.counter = idx;
  531. if (c->overlap)
  532. perf_sched_save_state(sched);
  533. return true;
  534. }
  535. static bool perf_sched_find_counter(struct perf_sched *sched)
  536. {
  537. while (!__perf_sched_find_counter(sched)) {
  538. if (!perf_sched_restore_state(sched))
  539. return false;
  540. }
  541. return true;
  542. }
  543. /*
  544. * Go through all unassigned events and find the next one to schedule.
  545. * Take events with the least weight first. Return true on success.
  546. */
  547. static bool perf_sched_next_event(struct perf_sched *sched)
  548. {
  549. struct event_constraint *c;
  550. if (!sched->state.unassigned || !--sched->state.unassigned)
  551. return false;
  552. do {
  553. /* next event */
  554. sched->state.event++;
  555. if (sched->state.event >= sched->max_events) {
  556. /* next weight */
  557. sched->state.event = 0;
  558. sched->state.weight++;
  559. if (sched->state.weight > sched->max_weight)
  560. return false;
  561. }
  562. c = sched->constraints[sched->state.event];
  563. } while (c->weight != sched->state.weight);
  564. sched->state.counter = 0; /* start with first counter */
  565. return true;
  566. }
  567. /*
  568. * Assign a counter for each event.
  569. */
  570. static int perf_assign_events(struct event_constraint **constraints, int n,
  571. int wmin, int wmax, int *assign)
  572. {
  573. struct perf_sched sched;
  574. perf_sched_init(&sched, constraints, n, wmin, wmax);
  575. do {
  576. if (!perf_sched_find_counter(&sched))
  577. break; /* failed */
  578. if (assign)
  579. assign[sched.state.event] = sched.state.counter;
  580. } while (perf_sched_next_event(&sched));
  581. return sched.state.unassigned;
  582. }
  583. int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  584. {
  585. struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
  586. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  587. int i, wmin, wmax, num = 0;
  588. struct hw_perf_event *hwc;
  589. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  590. for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
  591. c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  592. constraints[i] = c;
  593. wmin = min(wmin, c->weight);
  594. wmax = max(wmax, c->weight);
  595. }
  596. /*
  597. * fastpath, try to reuse previous register
  598. */
  599. for (i = 0; i < n; i++) {
  600. hwc = &cpuc->event_list[i]->hw;
  601. c = constraints[i];
  602. /* never assigned */
  603. if (hwc->idx == -1)
  604. break;
  605. /* constraint still honored */
  606. if (!test_bit(hwc->idx, c->idxmsk))
  607. break;
  608. /* not already used */
  609. if (test_bit(hwc->idx, used_mask))
  610. break;
  611. __set_bit(hwc->idx, used_mask);
  612. if (assign)
  613. assign[i] = hwc->idx;
  614. }
  615. /* slow path */
  616. if (i != n)
  617. num = perf_assign_events(constraints, n, wmin, wmax, assign);
  618. /*
  619. * scheduling failed or is just a simulation,
  620. * free resources if necessary
  621. */
  622. if (!assign || num) {
  623. for (i = 0; i < n; i++) {
  624. if (x86_pmu.put_event_constraints)
  625. x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
  626. }
  627. }
  628. return num ? -EINVAL : 0;
  629. }
  630. /*
  631. * dogrp: true if must collect siblings events (group)
  632. * returns total number of events and error code
  633. */
  634. static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  635. {
  636. struct perf_event *event;
  637. int n, max_count;
  638. max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
  639. /* current number of events already accepted */
  640. n = cpuc->n_events;
  641. if (is_x86_event(leader)) {
  642. if (n >= max_count)
  643. return -EINVAL;
  644. cpuc->event_list[n] = leader;
  645. n++;
  646. }
  647. if (!dogrp)
  648. return n;
  649. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  650. if (!is_x86_event(event) ||
  651. event->state <= PERF_EVENT_STATE_OFF)
  652. continue;
  653. if (n >= max_count)
  654. return -EINVAL;
  655. cpuc->event_list[n] = event;
  656. n++;
  657. }
  658. return n;
  659. }
  660. static inline void x86_assign_hw_event(struct perf_event *event,
  661. struct cpu_hw_events *cpuc, int i)
  662. {
  663. struct hw_perf_event *hwc = &event->hw;
  664. hwc->idx = cpuc->assign[i];
  665. hwc->last_cpu = smp_processor_id();
  666. hwc->last_tag = ++cpuc->tags[i];
  667. if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
  668. hwc->config_base = 0;
  669. hwc->event_base = 0;
  670. } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
  671. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  672. hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
  673. } else {
  674. hwc->config_base = x86_pmu_config_addr(hwc->idx);
  675. hwc->event_base = x86_pmu_event_addr(hwc->idx);
  676. }
  677. }
  678. static inline int match_prev_assignment(struct hw_perf_event *hwc,
  679. struct cpu_hw_events *cpuc,
  680. int i)
  681. {
  682. return hwc->idx == cpuc->assign[i] &&
  683. hwc->last_cpu == smp_processor_id() &&
  684. hwc->last_tag == cpuc->tags[i];
  685. }
  686. static void x86_pmu_start(struct perf_event *event, int flags);
  687. static void x86_pmu_enable(struct pmu *pmu)
  688. {
  689. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  690. struct perf_event *event;
  691. struct hw_perf_event *hwc;
  692. int i, added = cpuc->n_added;
  693. if (!x86_pmu_initialized())
  694. return;
  695. if (cpuc->enabled)
  696. return;
  697. if (cpuc->n_added) {
  698. int n_running = cpuc->n_events - cpuc->n_added;
  699. /*
  700. * apply assignment obtained either from
  701. * hw_perf_group_sched_in() or x86_pmu_enable()
  702. *
  703. * step1: save events moving to new counters
  704. * step2: reprogram moved events into new counters
  705. */
  706. for (i = 0; i < n_running; i++) {
  707. event = cpuc->event_list[i];
  708. hwc = &event->hw;
  709. /*
  710. * we can avoid reprogramming counter if:
  711. * - assigned same counter as last time
  712. * - running on same CPU as last time
  713. * - no other event has used the counter since
  714. */
  715. if (hwc->idx == -1 ||
  716. match_prev_assignment(hwc, cpuc, i))
  717. continue;
  718. /*
  719. * Ensure we don't accidentally enable a stopped
  720. * counter simply because we rescheduled.
  721. */
  722. if (hwc->state & PERF_HES_STOPPED)
  723. hwc->state |= PERF_HES_ARCH;
  724. x86_pmu_stop(event, PERF_EF_UPDATE);
  725. }
  726. for (i = 0; i < cpuc->n_events; i++) {
  727. event = cpuc->event_list[i];
  728. hwc = &event->hw;
  729. if (!match_prev_assignment(hwc, cpuc, i))
  730. x86_assign_hw_event(event, cpuc, i);
  731. else if (i < n_running)
  732. continue;
  733. if (hwc->state & PERF_HES_ARCH)
  734. continue;
  735. x86_pmu_start(event, PERF_EF_RELOAD);
  736. }
  737. cpuc->n_added = 0;
  738. perf_events_lapic_init();
  739. }
  740. cpuc->enabled = 1;
  741. barrier();
  742. x86_pmu.enable_all(added);
  743. }
  744. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  745. /*
  746. * Set the next IRQ period, based on the hwc->period_left value.
  747. * To be called with the event disabled in hw:
  748. */
  749. int x86_perf_event_set_period(struct perf_event *event)
  750. {
  751. struct hw_perf_event *hwc = &event->hw;
  752. s64 left = local64_read(&hwc->period_left);
  753. s64 period = hwc->sample_period;
  754. int ret = 0, idx = hwc->idx;
  755. if (idx == X86_PMC_IDX_FIXED_BTS)
  756. return 0;
  757. /*
  758. * If we are way outside a reasonable range then just skip forward:
  759. */
  760. if (unlikely(left <= -period)) {
  761. left = period;
  762. local64_set(&hwc->period_left, left);
  763. hwc->last_period = period;
  764. ret = 1;
  765. }
  766. if (unlikely(left <= 0)) {
  767. left += period;
  768. local64_set(&hwc->period_left, left);
  769. hwc->last_period = period;
  770. ret = 1;
  771. }
  772. /*
  773. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  774. */
  775. if (unlikely(left < 2))
  776. left = 2;
  777. if (left > x86_pmu.max_period)
  778. left = x86_pmu.max_period;
  779. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  780. /*
  781. * The hw event starts counting from this event offset,
  782. * mark it to be able to extra future deltas:
  783. */
  784. local64_set(&hwc->prev_count, (u64)-left);
  785. wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
  786. /*
  787. * Due to erratum on certan cpu we need
  788. * a second write to be sure the register
  789. * is updated properly
  790. */
  791. if (x86_pmu.perfctr_second_write) {
  792. wrmsrl(hwc->event_base,
  793. (u64)(-left) & x86_pmu.cntval_mask);
  794. }
  795. perf_event_update_userpage(event);
  796. return ret;
  797. }
  798. void x86_pmu_enable_event(struct perf_event *event)
  799. {
  800. if (__this_cpu_read(cpu_hw_events.enabled))
  801. __x86_pmu_enable_event(&event->hw,
  802. ARCH_PERFMON_EVENTSEL_ENABLE);
  803. }
  804. /*
  805. * Add a single event to the PMU.
  806. *
  807. * The event is added to the group of enabled events
  808. * but only if it can be scehduled with existing events.
  809. */
  810. static int x86_pmu_add(struct perf_event *event, int flags)
  811. {
  812. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  813. struct hw_perf_event *hwc;
  814. int assign[X86_PMC_IDX_MAX];
  815. int n, n0, ret;
  816. hwc = &event->hw;
  817. perf_pmu_disable(event->pmu);
  818. n0 = cpuc->n_events;
  819. ret = n = collect_events(cpuc, event, false);
  820. if (ret < 0)
  821. goto out;
  822. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  823. if (!(flags & PERF_EF_START))
  824. hwc->state |= PERF_HES_ARCH;
  825. /*
  826. * If group events scheduling transaction was started,
  827. * skip the schedulability test here, it will be performed
  828. * at commit time (->commit_txn) as a whole
  829. */
  830. if (cpuc->group_flag & PERF_EVENT_TXN)
  831. goto done_collect;
  832. ret = x86_pmu.schedule_events(cpuc, n, assign);
  833. if (ret)
  834. goto out;
  835. /*
  836. * copy new assignment, now we know it is possible
  837. * will be used by hw_perf_enable()
  838. */
  839. memcpy(cpuc->assign, assign, n*sizeof(int));
  840. done_collect:
  841. cpuc->n_events = n;
  842. cpuc->n_added += n - n0;
  843. cpuc->n_txn += n - n0;
  844. ret = 0;
  845. out:
  846. perf_pmu_enable(event->pmu);
  847. return ret;
  848. }
  849. static void x86_pmu_start(struct perf_event *event, int flags)
  850. {
  851. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  852. int idx = event->hw.idx;
  853. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  854. return;
  855. if (WARN_ON_ONCE(idx == -1))
  856. return;
  857. if (flags & PERF_EF_RELOAD) {
  858. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  859. x86_perf_event_set_period(event);
  860. }
  861. event->hw.state = 0;
  862. cpuc->events[idx] = event;
  863. __set_bit(idx, cpuc->active_mask);
  864. __set_bit(idx, cpuc->running);
  865. x86_pmu.enable(event);
  866. perf_event_update_userpage(event);
  867. }
  868. void perf_event_print_debug(void)
  869. {
  870. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  871. u64 pebs;
  872. struct cpu_hw_events *cpuc;
  873. unsigned long flags;
  874. int cpu, idx;
  875. if (!x86_pmu.num_counters)
  876. return;
  877. local_irq_save(flags);
  878. cpu = smp_processor_id();
  879. cpuc = &per_cpu(cpu_hw_events, cpu);
  880. if (x86_pmu.version >= 2) {
  881. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  882. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  883. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  884. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  885. rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
  886. pr_info("\n");
  887. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  888. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  889. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  890. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  891. pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
  892. }
  893. pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
  894. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  895. rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
  896. rdmsrl(x86_pmu_event_addr(idx), pmc_count);
  897. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  898. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  899. cpu, idx, pmc_ctrl);
  900. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  901. cpu, idx, pmc_count);
  902. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  903. cpu, idx, prev_left);
  904. }
  905. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
  906. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  907. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  908. cpu, idx, pmc_count);
  909. }
  910. local_irq_restore(flags);
  911. }
  912. void x86_pmu_stop(struct perf_event *event, int flags)
  913. {
  914. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  915. struct hw_perf_event *hwc = &event->hw;
  916. if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
  917. x86_pmu.disable(event);
  918. cpuc->events[hwc->idx] = NULL;
  919. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  920. hwc->state |= PERF_HES_STOPPED;
  921. }
  922. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  923. /*
  924. * Drain the remaining delta count out of a event
  925. * that we are disabling:
  926. */
  927. x86_perf_event_update(event);
  928. hwc->state |= PERF_HES_UPTODATE;
  929. }
  930. }
  931. static void x86_pmu_del(struct perf_event *event, int flags)
  932. {
  933. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  934. int i;
  935. /*
  936. * If we're called during a txn, we don't need to do anything.
  937. * The events never got scheduled and ->cancel_txn will truncate
  938. * the event_list.
  939. */
  940. if (cpuc->group_flag & PERF_EVENT_TXN)
  941. return;
  942. x86_pmu_stop(event, PERF_EF_UPDATE);
  943. for (i = 0; i < cpuc->n_events; i++) {
  944. if (event == cpuc->event_list[i]) {
  945. if (x86_pmu.put_event_constraints)
  946. x86_pmu.put_event_constraints(cpuc, event);
  947. while (++i < cpuc->n_events)
  948. cpuc->event_list[i-1] = cpuc->event_list[i];
  949. --cpuc->n_events;
  950. break;
  951. }
  952. }
  953. perf_event_update_userpage(event);
  954. }
  955. int x86_pmu_handle_irq(struct pt_regs *regs)
  956. {
  957. struct perf_sample_data data;
  958. struct cpu_hw_events *cpuc;
  959. struct perf_event *event;
  960. int idx, handled = 0;
  961. u64 val;
  962. perf_sample_data_init(&data, 0);
  963. cpuc = &__get_cpu_var(cpu_hw_events);
  964. /*
  965. * Some chipsets need to unmask the LVTPC in a particular spot
  966. * inside the nmi handler. As a result, the unmasking was pushed
  967. * into all the nmi handlers.
  968. *
  969. * This generic handler doesn't seem to have any issues where the
  970. * unmasking occurs so it was left at the top.
  971. */
  972. apic_write(APIC_LVTPC, APIC_DM_NMI);
  973. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  974. if (!test_bit(idx, cpuc->active_mask)) {
  975. /*
  976. * Though we deactivated the counter some cpus
  977. * might still deliver spurious interrupts still
  978. * in flight. Catch them:
  979. */
  980. if (__test_and_clear_bit(idx, cpuc->running))
  981. handled++;
  982. continue;
  983. }
  984. event = cpuc->events[idx];
  985. val = x86_perf_event_update(event);
  986. if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
  987. continue;
  988. /*
  989. * event overflow
  990. */
  991. handled++;
  992. data.period = event->hw.last_period;
  993. if (!x86_perf_event_set_period(event))
  994. continue;
  995. if (perf_event_overflow(event, &data, regs))
  996. x86_pmu_stop(event, 0);
  997. }
  998. if (handled)
  999. inc_irq_stat(apic_perf_irqs);
  1000. return handled;
  1001. }
  1002. void perf_events_lapic_init(void)
  1003. {
  1004. if (!x86_pmu.apic || !x86_pmu_initialized())
  1005. return;
  1006. /*
  1007. * Always use NMI for PMU
  1008. */
  1009. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1010. }
  1011. static int __kprobes
  1012. perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  1013. {
  1014. if (!atomic_read(&active_events))
  1015. return NMI_DONE;
  1016. return x86_pmu.handle_irq(regs);
  1017. }
  1018. struct event_constraint emptyconstraint;
  1019. struct event_constraint unconstrained;
  1020. static int __cpuinit
  1021. x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  1022. {
  1023. unsigned int cpu = (long)hcpu;
  1024. struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
  1025. int ret = NOTIFY_OK;
  1026. switch (action & ~CPU_TASKS_FROZEN) {
  1027. case CPU_UP_PREPARE:
  1028. cpuc->kfree_on_online = NULL;
  1029. if (x86_pmu.cpu_prepare)
  1030. ret = x86_pmu.cpu_prepare(cpu);
  1031. break;
  1032. case CPU_STARTING:
  1033. if (x86_pmu.attr_rdpmc)
  1034. set_in_cr4(X86_CR4_PCE);
  1035. if (x86_pmu.cpu_starting)
  1036. x86_pmu.cpu_starting(cpu);
  1037. break;
  1038. case CPU_ONLINE:
  1039. kfree(cpuc->kfree_on_online);
  1040. break;
  1041. case CPU_DYING:
  1042. if (x86_pmu.cpu_dying)
  1043. x86_pmu.cpu_dying(cpu);
  1044. break;
  1045. case CPU_UP_CANCELED:
  1046. case CPU_DEAD:
  1047. if (x86_pmu.cpu_dead)
  1048. x86_pmu.cpu_dead(cpu);
  1049. break;
  1050. default:
  1051. break;
  1052. }
  1053. return ret;
  1054. }
  1055. static void __init pmu_check_apic(void)
  1056. {
  1057. if (cpu_has_apic)
  1058. return;
  1059. x86_pmu.apic = 0;
  1060. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  1061. pr_info("no hardware sampling interrupt available.\n");
  1062. }
  1063. static struct attribute_group x86_pmu_format_group = {
  1064. .name = "format",
  1065. .attrs = NULL,
  1066. };
  1067. static int __init init_hw_perf_events(void)
  1068. {
  1069. struct x86_pmu_quirk *quirk;
  1070. struct event_constraint *c;
  1071. int err;
  1072. pr_info("Performance Events: ");
  1073. switch (boot_cpu_data.x86_vendor) {
  1074. case X86_VENDOR_INTEL:
  1075. err = intel_pmu_init();
  1076. break;
  1077. case X86_VENDOR_AMD:
  1078. err = amd_pmu_init();
  1079. break;
  1080. default:
  1081. return 0;
  1082. }
  1083. if (err != 0) {
  1084. pr_cont("no PMU driver, software events only.\n");
  1085. return 0;
  1086. }
  1087. pmu_check_apic();
  1088. /* sanity check that the hardware exists or is emulated */
  1089. if (!check_hw_exists())
  1090. return 0;
  1091. pr_cont("%s PMU driver.\n", x86_pmu.name);
  1092. for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
  1093. quirk->func();
  1094. if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
  1095. WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
  1096. x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
  1097. x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
  1098. }
  1099. x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
  1100. if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
  1101. WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
  1102. x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
  1103. x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
  1104. }
  1105. x86_pmu.intel_ctrl |=
  1106. ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  1107. perf_events_lapic_init();
  1108. register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
  1109. unconstrained = (struct event_constraint)
  1110. __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
  1111. 0, x86_pmu.num_counters, 0);
  1112. if (x86_pmu.event_constraints) {
  1113. /*
  1114. * event on fixed counter2 (REF_CYCLES) only works on this
  1115. * counter, so do not extend mask to generic counters
  1116. */
  1117. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1118. if (c->cmask != X86_RAW_EVENT_MASK
  1119. || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
  1120. continue;
  1121. }
  1122. c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
  1123. c->weight += x86_pmu.num_counters;
  1124. }
  1125. }
  1126. x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
  1127. x86_pmu_format_group.attrs = x86_pmu.format_attrs;
  1128. pr_info("... version: %d\n", x86_pmu.version);
  1129. pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
  1130. pr_info("... generic registers: %d\n", x86_pmu.num_counters);
  1131. pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
  1132. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  1133. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
  1134. pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
  1135. perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
  1136. perf_cpu_notifier(x86_pmu_notifier);
  1137. return 0;
  1138. }
  1139. early_initcall(init_hw_perf_events);
  1140. static inline void x86_pmu_read(struct perf_event *event)
  1141. {
  1142. x86_perf_event_update(event);
  1143. }
  1144. /*
  1145. * Start group events scheduling transaction
  1146. * Set the flag to make pmu::enable() not perform the
  1147. * schedulability test, it will be performed at commit time
  1148. */
  1149. static void x86_pmu_start_txn(struct pmu *pmu)
  1150. {
  1151. perf_pmu_disable(pmu);
  1152. __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
  1153. __this_cpu_write(cpu_hw_events.n_txn, 0);
  1154. }
  1155. /*
  1156. * Stop group events scheduling transaction
  1157. * Clear the flag and pmu::enable() will perform the
  1158. * schedulability test.
  1159. */
  1160. static void x86_pmu_cancel_txn(struct pmu *pmu)
  1161. {
  1162. __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
  1163. /*
  1164. * Truncate the collected events.
  1165. */
  1166. __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
  1167. __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
  1168. perf_pmu_enable(pmu);
  1169. }
  1170. /*
  1171. * Commit group events scheduling transaction
  1172. * Perform the group schedulability test as a whole
  1173. * Return 0 if success
  1174. */
  1175. static int x86_pmu_commit_txn(struct pmu *pmu)
  1176. {
  1177. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1178. int assign[X86_PMC_IDX_MAX];
  1179. int n, ret;
  1180. n = cpuc->n_events;
  1181. if (!x86_pmu_initialized())
  1182. return -EAGAIN;
  1183. ret = x86_pmu.schedule_events(cpuc, n, assign);
  1184. if (ret)
  1185. return ret;
  1186. /*
  1187. * copy new assignment, now we know it is possible
  1188. * will be used by hw_perf_enable()
  1189. */
  1190. memcpy(cpuc->assign, assign, n*sizeof(int));
  1191. cpuc->group_flag &= ~PERF_EVENT_TXN;
  1192. perf_pmu_enable(pmu);
  1193. return 0;
  1194. }
  1195. /*
  1196. * a fake_cpuc is used to validate event groups. Due to
  1197. * the extra reg logic, we need to also allocate a fake
  1198. * per_core and per_cpu structure. Otherwise, group events
  1199. * using extra reg may conflict without the kernel being
  1200. * able to catch this when the last event gets added to
  1201. * the group.
  1202. */
  1203. static void free_fake_cpuc(struct cpu_hw_events *cpuc)
  1204. {
  1205. kfree(cpuc->shared_regs);
  1206. kfree(cpuc);
  1207. }
  1208. static struct cpu_hw_events *allocate_fake_cpuc(void)
  1209. {
  1210. struct cpu_hw_events *cpuc;
  1211. int cpu = raw_smp_processor_id();
  1212. cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
  1213. if (!cpuc)
  1214. return ERR_PTR(-ENOMEM);
  1215. /* only needed, if we have extra_regs */
  1216. if (x86_pmu.extra_regs) {
  1217. cpuc->shared_regs = allocate_shared_regs(cpu);
  1218. if (!cpuc->shared_regs)
  1219. goto error;
  1220. }
  1221. return cpuc;
  1222. error:
  1223. free_fake_cpuc(cpuc);
  1224. return ERR_PTR(-ENOMEM);
  1225. }
  1226. /*
  1227. * validate that we can schedule this event
  1228. */
  1229. static int validate_event(struct perf_event *event)
  1230. {
  1231. struct cpu_hw_events *fake_cpuc;
  1232. struct event_constraint *c;
  1233. int ret = 0;
  1234. fake_cpuc = allocate_fake_cpuc();
  1235. if (IS_ERR(fake_cpuc))
  1236. return PTR_ERR(fake_cpuc);
  1237. c = x86_pmu.get_event_constraints(fake_cpuc, event);
  1238. if (!c || !c->weight)
  1239. ret = -EINVAL;
  1240. if (x86_pmu.put_event_constraints)
  1241. x86_pmu.put_event_constraints(fake_cpuc, event);
  1242. free_fake_cpuc(fake_cpuc);
  1243. return ret;
  1244. }
  1245. /*
  1246. * validate a single event group
  1247. *
  1248. * validation include:
  1249. * - check events are compatible which each other
  1250. * - events do not compete for the same counter
  1251. * - number of events <= number of counters
  1252. *
  1253. * validation ensures the group can be loaded onto the
  1254. * PMU if it was the only group available.
  1255. */
  1256. static int validate_group(struct perf_event *event)
  1257. {
  1258. struct perf_event *leader = event->group_leader;
  1259. struct cpu_hw_events *fake_cpuc;
  1260. int ret = -EINVAL, n;
  1261. fake_cpuc = allocate_fake_cpuc();
  1262. if (IS_ERR(fake_cpuc))
  1263. return PTR_ERR(fake_cpuc);
  1264. /*
  1265. * the event is not yet connected with its
  1266. * siblings therefore we must first collect
  1267. * existing siblings, then add the new event
  1268. * before we can simulate the scheduling
  1269. */
  1270. n = collect_events(fake_cpuc, leader, true);
  1271. if (n < 0)
  1272. goto out;
  1273. fake_cpuc->n_events = n;
  1274. n = collect_events(fake_cpuc, event, false);
  1275. if (n < 0)
  1276. goto out;
  1277. fake_cpuc->n_events = n;
  1278. ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
  1279. out:
  1280. free_fake_cpuc(fake_cpuc);
  1281. return ret;
  1282. }
  1283. static int x86_pmu_event_init(struct perf_event *event)
  1284. {
  1285. struct pmu *tmp;
  1286. int err;
  1287. switch (event->attr.type) {
  1288. case PERF_TYPE_RAW:
  1289. case PERF_TYPE_HARDWARE:
  1290. case PERF_TYPE_HW_CACHE:
  1291. break;
  1292. default:
  1293. return -ENOENT;
  1294. }
  1295. err = __x86_pmu_event_init(event);
  1296. if (!err) {
  1297. /*
  1298. * we temporarily connect event to its pmu
  1299. * such that validate_group() can classify
  1300. * it as an x86 event using is_x86_event()
  1301. */
  1302. tmp = event->pmu;
  1303. event->pmu = &pmu;
  1304. if (event->group_leader != event)
  1305. err = validate_group(event);
  1306. else
  1307. err = validate_event(event);
  1308. event->pmu = tmp;
  1309. }
  1310. if (err) {
  1311. if (event->destroy)
  1312. event->destroy(event);
  1313. }
  1314. return err;
  1315. }
  1316. static int x86_pmu_event_idx(struct perf_event *event)
  1317. {
  1318. int idx = event->hw.idx;
  1319. if (!x86_pmu.attr_rdpmc)
  1320. return 0;
  1321. if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
  1322. idx -= X86_PMC_IDX_FIXED;
  1323. idx |= 1 << 30;
  1324. }
  1325. return idx + 1;
  1326. }
  1327. static ssize_t get_attr_rdpmc(struct device *cdev,
  1328. struct device_attribute *attr,
  1329. char *buf)
  1330. {
  1331. return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
  1332. }
  1333. static void change_rdpmc(void *info)
  1334. {
  1335. bool enable = !!(unsigned long)info;
  1336. if (enable)
  1337. set_in_cr4(X86_CR4_PCE);
  1338. else
  1339. clear_in_cr4(X86_CR4_PCE);
  1340. }
  1341. static ssize_t set_attr_rdpmc(struct device *cdev,
  1342. struct device_attribute *attr,
  1343. const char *buf, size_t count)
  1344. {
  1345. unsigned long val = simple_strtoul(buf, NULL, 0);
  1346. if (!!val != !!x86_pmu.attr_rdpmc) {
  1347. x86_pmu.attr_rdpmc = !!val;
  1348. smp_call_function(change_rdpmc, (void *)val, 1);
  1349. }
  1350. return count;
  1351. }
  1352. static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
  1353. static struct attribute *x86_pmu_attrs[] = {
  1354. &dev_attr_rdpmc.attr,
  1355. NULL,
  1356. };
  1357. static struct attribute_group x86_pmu_attr_group = {
  1358. .attrs = x86_pmu_attrs,
  1359. };
  1360. static const struct attribute_group *x86_pmu_attr_groups[] = {
  1361. &x86_pmu_attr_group,
  1362. &x86_pmu_format_group,
  1363. NULL,
  1364. };
  1365. static void x86_pmu_flush_branch_stack(void)
  1366. {
  1367. if (x86_pmu.flush_branch_stack)
  1368. x86_pmu.flush_branch_stack();
  1369. }
  1370. static struct pmu pmu = {
  1371. .pmu_enable = x86_pmu_enable,
  1372. .pmu_disable = x86_pmu_disable,
  1373. .attr_groups = x86_pmu_attr_groups,
  1374. .event_init = x86_pmu_event_init,
  1375. .add = x86_pmu_add,
  1376. .del = x86_pmu_del,
  1377. .start = x86_pmu_start,
  1378. .stop = x86_pmu_stop,
  1379. .read = x86_pmu_read,
  1380. .start_txn = x86_pmu_start_txn,
  1381. .cancel_txn = x86_pmu_cancel_txn,
  1382. .commit_txn = x86_pmu_commit_txn,
  1383. .event_idx = x86_pmu_event_idx,
  1384. .flush_branch_stack = x86_pmu_flush_branch_stack,
  1385. };
  1386. void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
  1387. {
  1388. userpg->cap_usr_time = 0;
  1389. userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
  1390. userpg->pmc_width = x86_pmu.cntval_bits;
  1391. if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  1392. return;
  1393. if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  1394. return;
  1395. userpg->cap_usr_time = 1;
  1396. userpg->time_mult = this_cpu_read(cyc2ns);
  1397. userpg->time_shift = CYC2NS_SCALE_FACTOR;
  1398. userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
  1399. }
  1400. /*
  1401. * callchain support
  1402. */
  1403. static int backtrace_stack(void *data, char *name)
  1404. {
  1405. return 0;
  1406. }
  1407. static void backtrace_address(void *data, unsigned long addr, int reliable)
  1408. {
  1409. struct perf_callchain_entry *entry = data;
  1410. perf_callchain_store(entry, addr);
  1411. }
  1412. static const struct stacktrace_ops backtrace_ops = {
  1413. .stack = backtrace_stack,
  1414. .address = backtrace_address,
  1415. .walk_stack = print_context_stack_bp,
  1416. };
  1417. void
  1418. perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1419. {
  1420. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1421. /* TODO: We don't support guest os callchain now */
  1422. return;
  1423. }
  1424. perf_callchain_store(entry, regs->ip);
  1425. dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
  1426. }
  1427. #ifdef CONFIG_COMPAT
  1428. #include <asm/compat.h>
  1429. static inline int
  1430. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1431. {
  1432. /* 32-bit process in 64-bit kernel. */
  1433. struct stack_frame_ia32 frame;
  1434. const void __user *fp;
  1435. if (!test_thread_flag(TIF_IA32))
  1436. return 0;
  1437. fp = compat_ptr(regs->bp);
  1438. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1439. unsigned long bytes;
  1440. frame.next_frame = 0;
  1441. frame.return_address = 0;
  1442. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1443. if (bytes != sizeof(frame))
  1444. break;
  1445. if (fp < compat_ptr(regs->sp))
  1446. break;
  1447. perf_callchain_store(entry, frame.return_address);
  1448. fp = compat_ptr(frame.next_frame);
  1449. }
  1450. return 1;
  1451. }
  1452. #else
  1453. static inline int
  1454. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1455. {
  1456. return 0;
  1457. }
  1458. #endif
  1459. void
  1460. perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1461. {
  1462. struct stack_frame frame;
  1463. const void __user *fp;
  1464. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1465. /* TODO: We don't support guest os callchain now */
  1466. return;
  1467. }
  1468. fp = (void __user *)regs->bp;
  1469. perf_callchain_store(entry, regs->ip);
  1470. if (!current->mm)
  1471. return;
  1472. if (perf_callchain_user32(regs, entry))
  1473. return;
  1474. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1475. unsigned long bytes;
  1476. frame.next_frame = NULL;
  1477. frame.return_address = 0;
  1478. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1479. if (bytes != sizeof(frame))
  1480. break;
  1481. if ((unsigned long)fp < regs->sp)
  1482. break;
  1483. perf_callchain_store(entry, frame.return_address);
  1484. fp = frame.next_frame;
  1485. }
  1486. }
  1487. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1488. {
  1489. unsigned long ip;
  1490. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1491. ip = perf_guest_cbs->get_guest_ip();
  1492. else
  1493. ip = instruction_pointer(regs);
  1494. return ip;
  1495. }
  1496. unsigned long perf_misc_flags(struct pt_regs *regs)
  1497. {
  1498. int misc = 0;
  1499. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1500. if (perf_guest_cbs->is_user_mode())
  1501. misc |= PERF_RECORD_MISC_GUEST_USER;
  1502. else
  1503. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1504. } else {
  1505. if (user_mode(regs))
  1506. misc |= PERF_RECORD_MISC_USER;
  1507. else
  1508. misc |= PERF_RECORD_MISC_KERNEL;
  1509. }
  1510. if (regs->flags & PERF_EFLAGS_EXACT)
  1511. misc |= PERF_RECORD_MISC_EXACT_IP;
  1512. return misc;
  1513. }
  1514. void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
  1515. {
  1516. cap->version = x86_pmu.version;
  1517. cap->num_counters_gp = x86_pmu.num_counters;
  1518. cap->num_counters_fixed = x86_pmu.num_counters_fixed;
  1519. cap->bit_width_gp = x86_pmu.cntval_bits;
  1520. cap->bit_width_fixed = x86_pmu.cntval_bits;
  1521. cap->events_mask = (unsigned int)x86_pmu.events_maskl;
  1522. cap->events_mask_len = x86_pmu.events_mask_len;
  1523. }
  1524. EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);