perf_event.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. #include <linux/capability.h>
  16. #include <linux/notifier.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/module.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/sched.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/slab.h>
  24. #include <linux/highmem.h>
  25. #include <linux/cpu.h>
  26. #include <linux/bitops.h>
  27. #include <asm/apic.h>
  28. #include <asm/stacktrace.h>
  29. #include <asm/nmi.h>
  30. #include <asm/compat.h>
  31. #if 0
  32. #undef wrmsrl
  33. #define wrmsrl(msr, val) \
  34. do { \
  35. trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
  36. (unsigned long)(val)); \
  37. native_write_msr((msr), (u32)((u64)(val)), \
  38. (u32)((u64)(val) >> 32)); \
  39. } while (0)
  40. #endif
  41. /*
  42. * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
  43. */
  44. static unsigned long
  45. copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
  46. {
  47. unsigned long offset, addr = (unsigned long)from;
  48. unsigned long size, len = 0;
  49. struct page *page;
  50. void *map;
  51. int ret;
  52. do {
  53. ret = __get_user_pages_fast(addr, 1, 0, &page);
  54. if (!ret)
  55. break;
  56. offset = addr & (PAGE_SIZE - 1);
  57. size = min(PAGE_SIZE - offset, n - len);
  58. map = kmap_atomic(page);
  59. memcpy(to, map+offset, size);
  60. kunmap_atomic(map);
  61. put_page(page);
  62. len += size;
  63. to += size;
  64. addr += size;
  65. } while (len < n);
  66. return len;
  67. }
  68. struct event_constraint {
  69. union {
  70. unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  71. u64 idxmsk64;
  72. };
  73. u64 code;
  74. u64 cmask;
  75. int weight;
  76. };
  77. struct amd_nb {
  78. int nb_id; /* NorthBridge id */
  79. int refcnt; /* reference count */
  80. struct perf_event *owners[X86_PMC_IDX_MAX];
  81. struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  82. };
  83. #define MAX_LBR_ENTRIES 16
  84. struct cpu_hw_events {
  85. /*
  86. * Generic x86 PMC bits
  87. */
  88. struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
  89. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  90. unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  91. int enabled;
  92. int n_events;
  93. int n_added;
  94. int n_txn;
  95. int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
  96. u64 tags[X86_PMC_IDX_MAX];
  97. struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
  98. unsigned int group_flag;
  99. /*
  100. * Intel DebugStore bits
  101. */
  102. struct debug_store *ds;
  103. u64 pebs_enabled;
  104. /*
  105. * Intel LBR bits
  106. */
  107. int lbr_users;
  108. void *lbr_context;
  109. struct perf_branch_stack lbr_stack;
  110. struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
  111. /*
  112. * AMD specific bits
  113. */
  114. struct amd_nb *amd_nb;
  115. };
  116. #define __EVENT_CONSTRAINT(c, n, m, w) {\
  117. { .idxmsk64 = (n) }, \
  118. .code = (c), \
  119. .cmask = (m), \
  120. .weight = (w), \
  121. }
  122. #define EVENT_CONSTRAINT(c, n, m) \
  123. __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
  124. /*
  125. * Constraint on the Event code.
  126. */
  127. #define INTEL_EVENT_CONSTRAINT(c, n) \
  128. EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
  129. /*
  130. * Constraint on the Event code + UMask + fixed-mask
  131. *
  132. * filter mask to validate fixed counter events.
  133. * the following filters disqualify for fixed counters:
  134. * - inv
  135. * - edge
  136. * - cnt-mask
  137. * The other filters are supported by fixed counters.
  138. * The any-thread option is supported starting with v3.
  139. */
  140. #define FIXED_EVENT_CONSTRAINT(c, n) \
  141. EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
  142. /*
  143. * Constraint on the Event code + UMask
  144. */
  145. #define PEBS_EVENT_CONSTRAINT(c, n) \
  146. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
  147. #define EVENT_CONSTRAINT_END \
  148. EVENT_CONSTRAINT(0, 0, 0)
  149. #define for_each_event_constraint(e, c) \
  150. for ((e) = (c); (e)->weight; (e)++)
  151. union perf_capabilities {
  152. struct {
  153. u64 lbr_format : 6;
  154. u64 pebs_trap : 1;
  155. u64 pebs_arch_reg : 1;
  156. u64 pebs_format : 4;
  157. u64 smm_freeze : 1;
  158. };
  159. u64 capabilities;
  160. };
  161. /*
  162. * struct x86_pmu - generic x86 pmu
  163. */
  164. struct x86_pmu {
  165. /*
  166. * Generic x86 PMC bits
  167. */
  168. const char *name;
  169. int version;
  170. int (*handle_irq)(struct pt_regs *);
  171. void (*disable_all)(void);
  172. void (*enable_all)(int added);
  173. void (*enable)(struct perf_event *);
  174. void (*disable)(struct perf_event *);
  175. int (*hw_config)(struct perf_event *event);
  176. int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
  177. unsigned eventsel;
  178. unsigned perfctr;
  179. u64 (*event_map)(int);
  180. int max_events;
  181. int num_counters;
  182. int num_counters_fixed;
  183. int cntval_bits;
  184. u64 cntval_mask;
  185. int apic;
  186. u64 max_period;
  187. struct event_constraint *
  188. (*get_event_constraints)(struct cpu_hw_events *cpuc,
  189. struct perf_event *event);
  190. void (*put_event_constraints)(struct cpu_hw_events *cpuc,
  191. struct perf_event *event);
  192. struct event_constraint *event_constraints;
  193. void (*quirks)(void);
  194. int perfctr_second_write;
  195. int (*cpu_prepare)(int cpu);
  196. void (*cpu_starting)(int cpu);
  197. void (*cpu_dying)(int cpu);
  198. void (*cpu_dead)(int cpu);
  199. /*
  200. * Intel Arch Perfmon v2+
  201. */
  202. u64 intel_ctrl;
  203. union perf_capabilities intel_cap;
  204. /*
  205. * Intel DebugStore bits
  206. */
  207. int bts, pebs;
  208. int bts_active, pebs_active;
  209. int pebs_record_size;
  210. void (*drain_pebs)(struct pt_regs *regs);
  211. struct event_constraint *pebs_constraints;
  212. /*
  213. * Intel LBR
  214. */
  215. unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
  216. int lbr_nr; /* hardware stack size */
  217. };
  218. static struct x86_pmu x86_pmu __read_mostly;
  219. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  220. .enabled = 1,
  221. };
  222. static int x86_perf_event_set_period(struct perf_event *event);
  223. /*
  224. * Generalized hw caching related hw_event table, filled
  225. * in on a per model basis. A value of 0 means
  226. * 'not supported', -1 means 'hw_event makes no sense on
  227. * this CPU', any other value means the raw hw_event
  228. * ID.
  229. */
  230. #define C(x) PERF_COUNT_HW_CACHE_##x
  231. static u64 __read_mostly hw_cache_event_ids
  232. [PERF_COUNT_HW_CACHE_MAX]
  233. [PERF_COUNT_HW_CACHE_OP_MAX]
  234. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  235. /*
  236. * Propagate event elapsed time into the generic event.
  237. * Can only be executed on the CPU where the event is active.
  238. * Returns the delta events processed.
  239. */
  240. static u64
  241. x86_perf_event_update(struct perf_event *event)
  242. {
  243. struct hw_perf_event *hwc = &event->hw;
  244. int shift = 64 - x86_pmu.cntval_bits;
  245. u64 prev_raw_count, new_raw_count;
  246. int idx = hwc->idx;
  247. s64 delta;
  248. if (idx == X86_PMC_IDX_FIXED_BTS)
  249. return 0;
  250. /*
  251. * Careful: an NMI might modify the previous event value.
  252. *
  253. * Our tactic to handle this is to first atomically read and
  254. * exchange a new raw count - then add that new-prev delta
  255. * count to the generic event atomically:
  256. */
  257. again:
  258. prev_raw_count = local64_read(&hwc->prev_count);
  259. rdmsrl(hwc->event_base + idx, new_raw_count);
  260. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  261. new_raw_count) != prev_raw_count)
  262. goto again;
  263. /*
  264. * Now we have the new raw value and have updated the prev
  265. * timestamp already. We can now calculate the elapsed delta
  266. * (event-)time and add that to the generic event.
  267. *
  268. * Careful, not all hw sign-extends above the physical width
  269. * of the count.
  270. */
  271. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  272. delta >>= shift;
  273. local64_add(delta, &event->count);
  274. local64_sub(delta, &hwc->period_left);
  275. return new_raw_count;
  276. }
  277. static atomic_t active_events;
  278. static DEFINE_MUTEX(pmc_reserve_mutex);
  279. #ifdef CONFIG_X86_LOCAL_APIC
  280. static bool reserve_pmc_hardware(void)
  281. {
  282. int i;
  283. for (i = 0; i < x86_pmu.num_counters; i++) {
  284. if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
  285. goto perfctr_fail;
  286. }
  287. for (i = 0; i < x86_pmu.num_counters; i++) {
  288. if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
  289. goto eventsel_fail;
  290. }
  291. return true;
  292. eventsel_fail:
  293. for (i--; i >= 0; i--)
  294. release_evntsel_nmi(x86_pmu.eventsel + i);
  295. i = x86_pmu.num_counters;
  296. perfctr_fail:
  297. for (i--; i >= 0; i--)
  298. release_perfctr_nmi(x86_pmu.perfctr + i);
  299. return false;
  300. }
  301. static void release_pmc_hardware(void)
  302. {
  303. int i;
  304. for (i = 0; i < x86_pmu.num_counters; i++) {
  305. release_perfctr_nmi(x86_pmu.perfctr + i);
  306. release_evntsel_nmi(x86_pmu.eventsel + i);
  307. }
  308. }
  309. #else
  310. static bool reserve_pmc_hardware(void) { return true; }
  311. static void release_pmc_hardware(void) {}
  312. #endif
  313. static bool check_hw_exists(void)
  314. {
  315. u64 val, val_new = 0;
  316. int ret = 0;
  317. val = 0xabcdUL;
  318. ret |= checking_wrmsrl(x86_pmu.perfctr, val);
  319. ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
  320. if (ret || val != val_new)
  321. return false;
  322. return true;
  323. }
  324. static void reserve_ds_buffers(void);
  325. static void release_ds_buffers(void);
  326. static void hw_perf_event_destroy(struct perf_event *event)
  327. {
  328. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  329. release_pmc_hardware();
  330. release_ds_buffers();
  331. mutex_unlock(&pmc_reserve_mutex);
  332. }
  333. }
  334. static inline int x86_pmu_initialized(void)
  335. {
  336. return x86_pmu.handle_irq != NULL;
  337. }
  338. static inline int
  339. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
  340. {
  341. unsigned int cache_type, cache_op, cache_result;
  342. u64 config, val;
  343. config = attr->config;
  344. cache_type = (config >> 0) & 0xff;
  345. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  346. return -EINVAL;
  347. cache_op = (config >> 8) & 0xff;
  348. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  349. return -EINVAL;
  350. cache_result = (config >> 16) & 0xff;
  351. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  352. return -EINVAL;
  353. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  354. if (val == 0)
  355. return -ENOENT;
  356. if (val == -1)
  357. return -EINVAL;
  358. hwc->config |= val;
  359. return 0;
  360. }
  361. static int x86_setup_perfctr(struct perf_event *event)
  362. {
  363. struct perf_event_attr *attr = &event->attr;
  364. struct hw_perf_event *hwc = &event->hw;
  365. u64 config;
  366. if (!hwc->sample_period) {
  367. hwc->sample_period = x86_pmu.max_period;
  368. hwc->last_period = hwc->sample_period;
  369. local64_set(&hwc->period_left, hwc->sample_period);
  370. } else {
  371. /*
  372. * If we have a PMU initialized but no APIC
  373. * interrupts, we cannot sample hardware
  374. * events (user-space has to fall back and
  375. * sample via a hrtimer based software event):
  376. */
  377. if (!x86_pmu.apic)
  378. return -EOPNOTSUPP;
  379. }
  380. if (attr->type == PERF_TYPE_RAW)
  381. return 0;
  382. if (attr->type == PERF_TYPE_HW_CACHE)
  383. return set_ext_hw_attr(hwc, attr);
  384. if (attr->config >= x86_pmu.max_events)
  385. return -EINVAL;
  386. /*
  387. * The generic map:
  388. */
  389. config = x86_pmu.event_map(attr->config);
  390. if (config == 0)
  391. return -ENOENT;
  392. if (config == -1LL)
  393. return -EINVAL;
  394. /*
  395. * Branch tracing:
  396. */
  397. if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
  398. (hwc->sample_period == 1)) {
  399. /* BTS is not supported by this architecture. */
  400. if (!x86_pmu.bts_active)
  401. return -EOPNOTSUPP;
  402. /* BTS is currently only allowed for user-mode. */
  403. if (!attr->exclude_kernel)
  404. return -EOPNOTSUPP;
  405. }
  406. hwc->config |= config;
  407. return 0;
  408. }
  409. static int x86_pmu_hw_config(struct perf_event *event)
  410. {
  411. if (event->attr.precise_ip) {
  412. int precise = 0;
  413. /* Support for constant skid */
  414. if (x86_pmu.pebs_active) {
  415. precise++;
  416. /* Support for IP fixup */
  417. if (x86_pmu.lbr_nr)
  418. precise++;
  419. }
  420. if (event->attr.precise_ip > precise)
  421. return -EOPNOTSUPP;
  422. }
  423. /*
  424. * Generate PMC IRQs:
  425. * (keep 'enabled' bit clear for now)
  426. */
  427. event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
  428. /*
  429. * Count user and OS events unless requested not to
  430. */
  431. if (!event->attr.exclude_user)
  432. event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
  433. if (!event->attr.exclude_kernel)
  434. event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
  435. if (event->attr.type == PERF_TYPE_RAW)
  436. event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
  437. return x86_setup_perfctr(event);
  438. }
  439. /*
  440. * Setup the hardware configuration for a given attr_type
  441. */
  442. static int __x86_pmu_event_init(struct perf_event *event)
  443. {
  444. int err;
  445. if (!x86_pmu_initialized())
  446. return -ENODEV;
  447. err = 0;
  448. if (!atomic_inc_not_zero(&active_events)) {
  449. mutex_lock(&pmc_reserve_mutex);
  450. if (atomic_read(&active_events) == 0) {
  451. if (!reserve_pmc_hardware())
  452. err = -EBUSY;
  453. else
  454. reserve_ds_buffers();
  455. }
  456. if (!err)
  457. atomic_inc(&active_events);
  458. mutex_unlock(&pmc_reserve_mutex);
  459. }
  460. if (err)
  461. return err;
  462. event->destroy = hw_perf_event_destroy;
  463. event->hw.idx = -1;
  464. event->hw.last_cpu = -1;
  465. event->hw.last_tag = ~0ULL;
  466. return x86_pmu.hw_config(event);
  467. }
  468. static void x86_pmu_disable_all(void)
  469. {
  470. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  471. int idx;
  472. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  473. u64 val;
  474. if (!test_bit(idx, cpuc->active_mask))
  475. continue;
  476. rdmsrl(x86_pmu.eventsel + idx, val);
  477. if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
  478. continue;
  479. val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  480. wrmsrl(x86_pmu.eventsel + idx, val);
  481. }
  482. }
  483. static void x86_pmu_disable(struct pmu *pmu)
  484. {
  485. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  486. if (!x86_pmu_initialized())
  487. return;
  488. if (!cpuc->enabled)
  489. return;
  490. cpuc->n_added = 0;
  491. cpuc->enabled = 0;
  492. barrier();
  493. x86_pmu.disable_all();
  494. }
  495. static void x86_pmu_enable_all(int added)
  496. {
  497. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  498. int idx;
  499. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  500. struct perf_event *event = cpuc->events[idx];
  501. u64 val;
  502. if (!test_bit(idx, cpuc->active_mask))
  503. continue;
  504. val = event->hw.config;
  505. val |= ARCH_PERFMON_EVENTSEL_ENABLE;
  506. wrmsrl(x86_pmu.eventsel + idx, val);
  507. }
  508. }
  509. static struct pmu pmu;
  510. static inline int is_x86_event(struct perf_event *event)
  511. {
  512. return event->pmu == &pmu;
  513. }
  514. static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  515. {
  516. struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
  517. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  518. int i, j, w, wmax, num = 0;
  519. struct hw_perf_event *hwc;
  520. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  521. for (i = 0; i < n; i++) {
  522. c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  523. constraints[i] = c;
  524. }
  525. /*
  526. * fastpath, try to reuse previous register
  527. */
  528. for (i = 0; i < n; i++) {
  529. hwc = &cpuc->event_list[i]->hw;
  530. c = constraints[i];
  531. /* never assigned */
  532. if (hwc->idx == -1)
  533. break;
  534. /* constraint still honored */
  535. if (!test_bit(hwc->idx, c->idxmsk))
  536. break;
  537. /* not already used */
  538. if (test_bit(hwc->idx, used_mask))
  539. break;
  540. __set_bit(hwc->idx, used_mask);
  541. if (assign)
  542. assign[i] = hwc->idx;
  543. }
  544. if (i == n)
  545. goto done;
  546. /*
  547. * begin slow path
  548. */
  549. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  550. /*
  551. * weight = number of possible counters
  552. *
  553. * 1 = most constrained, only works on one counter
  554. * wmax = least constrained, works on any counter
  555. *
  556. * assign events to counters starting with most
  557. * constrained events.
  558. */
  559. wmax = x86_pmu.num_counters;
  560. /*
  561. * when fixed event counters are present,
  562. * wmax is incremented by 1 to account
  563. * for one more choice
  564. */
  565. if (x86_pmu.num_counters_fixed)
  566. wmax++;
  567. for (w = 1, num = n; num && w <= wmax; w++) {
  568. /* for each event */
  569. for (i = 0; num && i < n; i++) {
  570. c = constraints[i];
  571. hwc = &cpuc->event_list[i]->hw;
  572. if (c->weight != w)
  573. continue;
  574. for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
  575. if (!test_bit(j, used_mask))
  576. break;
  577. }
  578. if (j == X86_PMC_IDX_MAX)
  579. break;
  580. __set_bit(j, used_mask);
  581. if (assign)
  582. assign[i] = j;
  583. num--;
  584. }
  585. }
  586. done:
  587. /*
  588. * scheduling failed or is just a simulation,
  589. * free resources if necessary
  590. */
  591. if (!assign || num) {
  592. for (i = 0; i < n; i++) {
  593. if (x86_pmu.put_event_constraints)
  594. x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
  595. }
  596. }
  597. return num ? -ENOSPC : 0;
  598. }
  599. /*
  600. * dogrp: true if must collect siblings events (group)
  601. * returns total number of events and error code
  602. */
  603. static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  604. {
  605. struct perf_event *event;
  606. int n, max_count;
  607. max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
  608. /* current number of events already accepted */
  609. n = cpuc->n_events;
  610. if (is_x86_event(leader)) {
  611. if (n >= max_count)
  612. return -ENOSPC;
  613. cpuc->event_list[n] = leader;
  614. n++;
  615. }
  616. if (!dogrp)
  617. return n;
  618. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  619. if (!is_x86_event(event) ||
  620. event->state <= PERF_EVENT_STATE_OFF)
  621. continue;
  622. if (n >= max_count)
  623. return -ENOSPC;
  624. cpuc->event_list[n] = event;
  625. n++;
  626. }
  627. return n;
  628. }
  629. static inline void x86_assign_hw_event(struct perf_event *event,
  630. struct cpu_hw_events *cpuc, int i)
  631. {
  632. struct hw_perf_event *hwc = &event->hw;
  633. hwc->idx = cpuc->assign[i];
  634. hwc->last_cpu = smp_processor_id();
  635. hwc->last_tag = ++cpuc->tags[i];
  636. if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
  637. hwc->config_base = 0;
  638. hwc->event_base = 0;
  639. } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
  640. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  641. /*
  642. * We set it so that event_base + idx in wrmsr/rdmsr maps to
  643. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  644. */
  645. hwc->event_base =
  646. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  647. } else {
  648. hwc->config_base = x86_pmu.eventsel;
  649. hwc->event_base = x86_pmu.perfctr;
  650. }
  651. }
  652. static inline int match_prev_assignment(struct hw_perf_event *hwc,
  653. struct cpu_hw_events *cpuc,
  654. int i)
  655. {
  656. return hwc->idx == cpuc->assign[i] &&
  657. hwc->last_cpu == smp_processor_id() &&
  658. hwc->last_tag == cpuc->tags[i];
  659. }
  660. static void x86_pmu_start(struct perf_event *event, int flags);
  661. static void x86_pmu_stop(struct perf_event *event, int flags);
  662. static void x86_pmu_enable(struct pmu *pmu)
  663. {
  664. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  665. struct perf_event *event;
  666. struct hw_perf_event *hwc;
  667. int i, added = cpuc->n_added;
  668. if (!x86_pmu_initialized())
  669. return;
  670. if (cpuc->enabled)
  671. return;
  672. if (cpuc->n_added) {
  673. int n_running = cpuc->n_events - cpuc->n_added;
  674. /*
  675. * apply assignment obtained either from
  676. * hw_perf_group_sched_in() or x86_pmu_enable()
  677. *
  678. * step1: save events moving to new counters
  679. * step2: reprogram moved events into new counters
  680. */
  681. for (i = 0; i < n_running; i++) {
  682. event = cpuc->event_list[i];
  683. hwc = &event->hw;
  684. /*
  685. * we can avoid reprogramming counter if:
  686. * - assigned same counter as last time
  687. * - running on same CPU as last time
  688. * - no other event has used the counter since
  689. */
  690. if (hwc->idx == -1 ||
  691. match_prev_assignment(hwc, cpuc, i))
  692. continue;
  693. /*
  694. * Ensure we don't accidentally enable a stopped
  695. * counter simply because we rescheduled.
  696. */
  697. if (hwc->state & PERF_HES_STOPPED)
  698. hwc->state |= PERF_HES_ARCH;
  699. x86_pmu_stop(event, PERF_EF_UPDATE);
  700. }
  701. for (i = 0; i < cpuc->n_events; i++) {
  702. event = cpuc->event_list[i];
  703. hwc = &event->hw;
  704. if (!match_prev_assignment(hwc, cpuc, i))
  705. x86_assign_hw_event(event, cpuc, i);
  706. else if (i < n_running)
  707. continue;
  708. if (hwc->state & PERF_HES_ARCH)
  709. continue;
  710. x86_pmu_start(event, PERF_EF_RELOAD);
  711. }
  712. cpuc->n_added = 0;
  713. perf_events_lapic_init();
  714. }
  715. cpuc->enabled = 1;
  716. barrier();
  717. x86_pmu.enable_all(added);
  718. }
  719. static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
  720. u64 enable_mask)
  721. {
  722. wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
  723. }
  724. static inline void x86_pmu_disable_event(struct perf_event *event)
  725. {
  726. struct hw_perf_event *hwc = &event->hw;
  727. wrmsrl(hwc->config_base + hwc->idx, hwc->config);
  728. }
  729. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  730. /*
  731. * Set the next IRQ period, based on the hwc->period_left value.
  732. * To be called with the event disabled in hw:
  733. */
  734. static int
  735. x86_perf_event_set_period(struct perf_event *event)
  736. {
  737. struct hw_perf_event *hwc = &event->hw;
  738. s64 left = local64_read(&hwc->period_left);
  739. s64 period = hwc->sample_period;
  740. int ret = 0, idx = hwc->idx;
  741. if (idx == X86_PMC_IDX_FIXED_BTS)
  742. return 0;
  743. /*
  744. * If we are way outside a reasonable range then just skip forward:
  745. */
  746. if (unlikely(left <= -period)) {
  747. left = period;
  748. local64_set(&hwc->period_left, left);
  749. hwc->last_period = period;
  750. ret = 1;
  751. }
  752. if (unlikely(left <= 0)) {
  753. left += period;
  754. local64_set(&hwc->period_left, left);
  755. hwc->last_period = period;
  756. ret = 1;
  757. }
  758. /*
  759. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  760. */
  761. if (unlikely(left < 2))
  762. left = 2;
  763. if (left > x86_pmu.max_period)
  764. left = x86_pmu.max_period;
  765. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  766. /*
  767. * The hw event starts counting from this event offset,
  768. * mark it to be able to extra future deltas:
  769. */
  770. local64_set(&hwc->prev_count, (u64)-left);
  771. wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
  772. /*
  773. * Due to erratum on certan cpu we need
  774. * a second write to be sure the register
  775. * is updated properly
  776. */
  777. if (x86_pmu.perfctr_second_write) {
  778. wrmsrl(hwc->event_base + idx,
  779. (u64)(-left) & x86_pmu.cntval_mask);
  780. }
  781. perf_event_update_userpage(event);
  782. return ret;
  783. }
  784. static void x86_pmu_enable_event(struct perf_event *event)
  785. {
  786. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  787. if (cpuc->enabled)
  788. __x86_pmu_enable_event(&event->hw,
  789. ARCH_PERFMON_EVENTSEL_ENABLE);
  790. }
  791. /*
  792. * Add a single event to the PMU.
  793. *
  794. * The event is added to the group of enabled events
  795. * but only if it can be scehduled with existing events.
  796. */
  797. static int x86_pmu_add(struct perf_event *event, int flags)
  798. {
  799. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  800. struct hw_perf_event *hwc;
  801. int assign[X86_PMC_IDX_MAX];
  802. int n, n0, ret;
  803. hwc = &event->hw;
  804. perf_pmu_disable(event->pmu);
  805. n0 = cpuc->n_events;
  806. ret = n = collect_events(cpuc, event, false);
  807. if (ret < 0)
  808. goto out;
  809. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  810. if (!(flags & PERF_EF_START))
  811. hwc->state |= PERF_HES_ARCH;
  812. /*
  813. * If group events scheduling transaction was started,
  814. * skip the schedulability test here, it will be peformed
  815. * at commit time (->commit_txn) as a whole
  816. */
  817. if (cpuc->group_flag & PERF_EVENT_TXN)
  818. goto done_collect;
  819. ret = x86_pmu.schedule_events(cpuc, n, assign);
  820. if (ret)
  821. goto out;
  822. /*
  823. * copy new assignment, now we know it is possible
  824. * will be used by hw_perf_enable()
  825. */
  826. memcpy(cpuc->assign, assign, n*sizeof(int));
  827. done_collect:
  828. cpuc->n_events = n;
  829. cpuc->n_added += n - n0;
  830. cpuc->n_txn += n - n0;
  831. ret = 0;
  832. out:
  833. perf_pmu_enable(event->pmu);
  834. return ret;
  835. }
  836. static void x86_pmu_start(struct perf_event *event, int flags)
  837. {
  838. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  839. int idx = event->hw.idx;
  840. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  841. return;
  842. if (WARN_ON_ONCE(idx == -1))
  843. return;
  844. if (flags & PERF_EF_RELOAD) {
  845. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  846. x86_perf_event_set_period(event);
  847. }
  848. event->hw.state = 0;
  849. cpuc->events[idx] = event;
  850. __set_bit(idx, cpuc->active_mask);
  851. __set_bit(idx, cpuc->running);
  852. x86_pmu.enable(event);
  853. perf_event_update_userpage(event);
  854. }
  855. void perf_event_print_debug(void)
  856. {
  857. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  858. u64 pebs;
  859. struct cpu_hw_events *cpuc;
  860. unsigned long flags;
  861. int cpu, idx;
  862. if (!x86_pmu.num_counters)
  863. return;
  864. local_irq_save(flags);
  865. cpu = smp_processor_id();
  866. cpuc = &per_cpu(cpu_hw_events, cpu);
  867. if (x86_pmu.version >= 2) {
  868. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  869. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  870. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  871. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  872. rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
  873. pr_info("\n");
  874. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  875. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  876. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  877. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  878. pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
  879. }
  880. pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
  881. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  882. rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
  883. rdmsrl(x86_pmu.perfctr + idx, pmc_count);
  884. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  885. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  886. cpu, idx, pmc_ctrl);
  887. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  888. cpu, idx, pmc_count);
  889. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  890. cpu, idx, prev_left);
  891. }
  892. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
  893. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  894. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  895. cpu, idx, pmc_count);
  896. }
  897. local_irq_restore(flags);
  898. }
  899. static void x86_pmu_stop(struct perf_event *event, int flags)
  900. {
  901. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  902. struct hw_perf_event *hwc = &event->hw;
  903. if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
  904. x86_pmu.disable(event);
  905. cpuc->events[hwc->idx] = NULL;
  906. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  907. hwc->state |= PERF_HES_STOPPED;
  908. }
  909. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  910. /*
  911. * Drain the remaining delta count out of a event
  912. * that we are disabling:
  913. */
  914. x86_perf_event_update(event);
  915. hwc->state |= PERF_HES_UPTODATE;
  916. }
  917. }
  918. static void x86_pmu_del(struct perf_event *event, int flags)
  919. {
  920. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  921. int i;
  922. /*
  923. * If we're called during a txn, we don't need to do anything.
  924. * The events never got scheduled and ->cancel_txn will truncate
  925. * the event_list.
  926. */
  927. if (cpuc->group_flag & PERF_EVENT_TXN)
  928. return;
  929. x86_pmu_stop(event, PERF_EF_UPDATE);
  930. for (i = 0; i < cpuc->n_events; i++) {
  931. if (event == cpuc->event_list[i]) {
  932. if (x86_pmu.put_event_constraints)
  933. x86_pmu.put_event_constraints(cpuc, event);
  934. while (++i < cpuc->n_events)
  935. cpuc->event_list[i-1] = cpuc->event_list[i];
  936. --cpuc->n_events;
  937. break;
  938. }
  939. }
  940. perf_event_update_userpage(event);
  941. }
  942. static int x86_pmu_handle_irq(struct pt_regs *regs)
  943. {
  944. struct perf_sample_data data;
  945. struct cpu_hw_events *cpuc;
  946. struct perf_event *event;
  947. int idx, handled = 0;
  948. u64 val;
  949. perf_sample_data_init(&data, 0);
  950. cpuc = &__get_cpu_var(cpu_hw_events);
  951. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  952. if (!test_bit(idx, cpuc->active_mask)) {
  953. /*
  954. * Though we deactivated the counter some cpus
  955. * might still deliver spurious interrupts still
  956. * in flight. Catch them:
  957. */
  958. if (__test_and_clear_bit(idx, cpuc->running))
  959. handled++;
  960. continue;
  961. }
  962. event = cpuc->events[idx];
  963. val = x86_perf_event_update(event);
  964. if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
  965. continue;
  966. /*
  967. * event overflow
  968. */
  969. handled++;
  970. data.period = event->hw.last_period;
  971. if (!x86_perf_event_set_period(event))
  972. continue;
  973. if (perf_event_overflow(event, 1, &data, regs))
  974. x86_pmu_stop(event, 0);
  975. }
  976. if (handled)
  977. inc_irq_stat(apic_perf_irqs);
  978. return handled;
  979. }
  980. void perf_events_lapic_init(void)
  981. {
  982. if (!x86_pmu.apic || !x86_pmu_initialized())
  983. return;
  984. /*
  985. * Always use NMI for PMU
  986. */
  987. apic_write(APIC_LVTPC, APIC_DM_NMI);
  988. }
  989. struct pmu_nmi_state {
  990. unsigned int marked;
  991. int handled;
  992. };
  993. static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
  994. static int __kprobes
  995. perf_event_nmi_handler(struct notifier_block *self,
  996. unsigned long cmd, void *__args)
  997. {
  998. struct die_args *args = __args;
  999. unsigned int this_nmi;
  1000. int handled;
  1001. if (!atomic_read(&active_events))
  1002. return NOTIFY_DONE;
  1003. switch (cmd) {
  1004. case DIE_NMI:
  1005. case DIE_NMI_IPI:
  1006. break;
  1007. case DIE_NMIUNKNOWN:
  1008. this_nmi = percpu_read(irq_stat.__nmi_count);
  1009. if (this_nmi != __get_cpu_var(pmu_nmi).marked)
  1010. /* let the kernel handle the unknown nmi */
  1011. return NOTIFY_DONE;
  1012. /*
  1013. * This one is a PMU back-to-back nmi. Two events
  1014. * trigger 'simultaneously' raising two back-to-back
  1015. * NMIs. If the first NMI handles both, the latter
  1016. * will be empty and daze the CPU. So, we drop it to
  1017. * avoid false-positive 'unknown nmi' messages.
  1018. */
  1019. return NOTIFY_STOP;
  1020. default:
  1021. return NOTIFY_DONE;
  1022. }
  1023. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1024. handled = x86_pmu.handle_irq(args->regs);
  1025. if (!handled)
  1026. return NOTIFY_DONE;
  1027. this_nmi = percpu_read(irq_stat.__nmi_count);
  1028. if ((handled > 1) ||
  1029. /* the next nmi could be a back-to-back nmi */
  1030. ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
  1031. (__get_cpu_var(pmu_nmi).handled > 1))) {
  1032. /*
  1033. * We could have two subsequent back-to-back nmis: The
  1034. * first handles more than one counter, the 2nd
  1035. * handles only one counter and the 3rd handles no
  1036. * counter.
  1037. *
  1038. * This is the 2nd nmi because the previous was
  1039. * handling more than one counter. We will mark the
  1040. * next (3rd) and then drop it if unhandled.
  1041. */
  1042. __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
  1043. __get_cpu_var(pmu_nmi).handled = handled;
  1044. }
  1045. return NOTIFY_STOP;
  1046. }
  1047. static __read_mostly struct notifier_block perf_event_nmi_notifier = {
  1048. .notifier_call = perf_event_nmi_handler,
  1049. .next = NULL,
  1050. .priority = 1
  1051. };
  1052. static struct event_constraint unconstrained;
  1053. static struct event_constraint emptyconstraint;
  1054. static struct event_constraint *
  1055. x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1056. {
  1057. struct event_constraint *c;
  1058. if (x86_pmu.event_constraints) {
  1059. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1060. if ((event->hw.config & c->cmask) == c->code)
  1061. return c;
  1062. }
  1063. }
  1064. return &unconstrained;
  1065. }
  1066. #include "perf_event_amd.c"
  1067. #include "perf_event_p6.c"
  1068. #include "perf_event_p4.c"
  1069. #include "perf_event_intel_lbr.c"
  1070. #include "perf_event_intel_ds.c"
  1071. #include "perf_event_intel.c"
  1072. static int __cpuinit
  1073. x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  1074. {
  1075. unsigned int cpu = (long)hcpu;
  1076. int ret = NOTIFY_OK;
  1077. switch (action & ~CPU_TASKS_FROZEN) {
  1078. case CPU_UP_PREPARE:
  1079. if (x86_pmu.cpu_prepare)
  1080. ret = x86_pmu.cpu_prepare(cpu);
  1081. break;
  1082. case CPU_STARTING:
  1083. if (x86_pmu.cpu_starting)
  1084. x86_pmu.cpu_starting(cpu);
  1085. break;
  1086. case CPU_DYING:
  1087. if (x86_pmu.cpu_dying)
  1088. x86_pmu.cpu_dying(cpu);
  1089. break;
  1090. case CPU_UP_CANCELED:
  1091. case CPU_DEAD:
  1092. if (x86_pmu.cpu_dead)
  1093. x86_pmu.cpu_dead(cpu);
  1094. break;
  1095. default:
  1096. break;
  1097. }
  1098. return ret;
  1099. }
  1100. static void __init pmu_check_apic(void)
  1101. {
  1102. if (cpu_has_apic)
  1103. return;
  1104. x86_pmu.apic = 0;
  1105. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  1106. pr_info("no hardware sampling interrupt available.\n");
  1107. }
  1108. void __init init_hw_perf_events(void)
  1109. {
  1110. struct event_constraint *c;
  1111. int err;
  1112. pr_info("Performance Events: ");
  1113. switch (boot_cpu_data.x86_vendor) {
  1114. case X86_VENDOR_INTEL:
  1115. err = intel_pmu_init();
  1116. break;
  1117. case X86_VENDOR_AMD:
  1118. err = amd_pmu_init();
  1119. break;
  1120. default:
  1121. return;
  1122. }
  1123. if (err != 0) {
  1124. pr_cont("no PMU driver, software events only.\n");
  1125. return;
  1126. }
  1127. pmu_check_apic();
  1128. /* sanity check that the hardware exists or is emulated */
  1129. if (!check_hw_exists()) {
  1130. pr_cont("Broken PMU hardware detected, software events only.\n");
  1131. return;
  1132. }
  1133. pr_cont("%s PMU driver.\n", x86_pmu.name);
  1134. if (x86_pmu.quirks)
  1135. x86_pmu.quirks();
  1136. if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
  1137. WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
  1138. x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
  1139. x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
  1140. }
  1141. x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
  1142. if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
  1143. WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
  1144. x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
  1145. x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
  1146. }
  1147. x86_pmu.intel_ctrl |=
  1148. ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  1149. perf_events_lapic_init();
  1150. register_die_notifier(&perf_event_nmi_notifier);
  1151. unconstrained = (struct event_constraint)
  1152. __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
  1153. 0, x86_pmu.num_counters);
  1154. if (x86_pmu.event_constraints) {
  1155. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1156. if (c->cmask != X86_RAW_EVENT_MASK)
  1157. continue;
  1158. c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
  1159. c->weight += x86_pmu.num_counters;
  1160. }
  1161. }
  1162. pr_info("... version: %d\n", x86_pmu.version);
  1163. pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
  1164. pr_info("... generic registers: %d\n", x86_pmu.num_counters);
  1165. pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
  1166. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  1167. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
  1168. pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
  1169. perf_pmu_register(&pmu);
  1170. perf_cpu_notifier(x86_pmu_notifier);
  1171. }
  1172. static inline void x86_pmu_read(struct perf_event *event)
  1173. {
  1174. x86_perf_event_update(event);
  1175. }
  1176. /*
  1177. * Start group events scheduling transaction
  1178. * Set the flag to make pmu::enable() not perform the
  1179. * schedulability test, it will be performed at commit time
  1180. */
  1181. static void x86_pmu_start_txn(struct pmu *pmu)
  1182. {
  1183. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1184. perf_pmu_disable(pmu);
  1185. cpuc->group_flag |= PERF_EVENT_TXN;
  1186. cpuc->n_txn = 0;
  1187. }
  1188. /*
  1189. * Stop group events scheduling transaction
  1190. * Clear the flag and pmu::enable() will perform the
  1191. * schedulability test.
  1192. */
  1193. static void x86_pmu_cancel_txn(struct pmu *pmu)
  1194. {
  1195. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1196. cpuc->group_flag &= ~PERF_EVENT_TXN;
  1197. /*
  1198. * Truncate the collected events.
  1199. */
  1200. cpuc->n_added -= cpuc->n_txn;
  1201. cpuc->n_events -= cpuc->n_txn;
  1202. perf_pmu_enable(pmu);
  1203. }
  1204. /*
  1205. * Commit group events scheduling transaction
  1206. * Perform the group schedulability test as a whole
  1207. * Return 0 if success
  1208. */
  1209. static int x86_pmu_commit_txn(struct pmu *pmu)
  1210. {
  1211. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1212. int assign[X86_PMC_IDX_MAX];
  1213. int n, ret;
  1214. n = cpuc->n_events;
  1215. if (!x86_pmu_initialized())
  1216. return -EAGAIN;
  1217. ret = x86_pmu.schedule_events(cpuc, n, assign);
  1218. if (ret)
  1219. return ret;
  1220. /*
  1221. * copy new assignment, now we know it is possible
  1222. * will be used by hw_perf_enable()
  1223. */
  1224. memcpy(cpuc->assign, assign, n*sizeof(int));
  1225. cpuc->group_flag &= ~PERF_EVENT_TXN;
  1226. perf_pmu_enable(pmu);
  1227. return 0;
  1228. }
  1229. /*
  1230. * validate that we can schedule this event
  1231. */
  1232. static int validate_event(struct perf_event *event)
  1233. {
  1234. struct cpu_hw_events *fake_cpuc;
  1235. struct event_constraint *c;
  1236. int ret = 0;
  1237. fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
  1238. if (!fake_cpuc)
  1239. return -ENOMEM;
  1240. c = x86_pmu.get_event_constraints(fake_cpuc, event);
  1241. if (!c || !c->weight)
  1242. ret = -ENOSPC;
  1243. if (x86_pmu.put_event_constraints)
  1244. x86_pmu.put_event_constraints(fake_cpuc, event);
  1245. kfree(fake_cpuc);
  1246. return ret;
  1247. }
  1248. /*
  1249. * validate a single event group
  1250. *
  1251. * validation include:
  1252. * - check events are compatible which each other
  1253. * - events do not compete for the same counter
  1254. * - number of events <= number of counters
  1255. *
  1256. * validation ensures the group can be loaded onto the
  1257. * PMU if it was the only group available.
  1258. */
  1259. static int validate_group(struct perf_event *event)
  1260. {
  1261. struct perf_event *leader = event->group_leader;
  1262. struct cpu_hw_events *fake_cpuc;
  1263. int ret, n;
  1264. ret = -ENOMEM;
  1265. fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
  1266. if (!fake_cpuc)
  1267. goto out;
  1268. /*
  1269. * the event is not yet connected with its
  1270. * siblings therefore we must first collect
  1271. * existing siblings, then add the new event
  1272. * before we can simulate the scheduling
  1273. */
  1274. ret = -ENOSPC;
  1275. n = collect_events(fake_cpuc, leader, true);
  1276. if (n < 0)
  1277. goto out_free;
  1278. fake_cpuc->n_events = n;
  1279. n = collect_events(fake_cpuc, event, false);
  1280. if (n < 0)
  1281. goto out_free;
  1282. fake_cpuc->n_events = n;
  1283. ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
  1284. out_free:
  1285. kfree(fake_cpuc);
  1286. out:
  1287. return ret;
  1288. }
  1289. int x86_pmu_event_init(struct perf_event *event)
  1290. {
  1291. struct pmu *tmp;
  1292. int err;
  1293. switch (event->attr.type) {
  1294. case PERF_TYPE_RAW:
  1295. case PERF_TYPE_HARDWARE:
  1296. case PERF_TYPE_HW_CACHE:
  1297. break;
  1298. default:
  1299. return -ENOENT;
  1300. }
  1301. err = __x86_pmu_event_init(event);
  1302. if (!err) {
  1303. /*
  1304. * we temporarily connect event to its pmu
  1305. * such that validate_group() can classify
  1306. * it as an x86 event using is_x86_event()
  1307. */
  1308. tmp = event->pmu;
  1309. event->pmu = &pmu;
  1310. if (event->group_leader != event)
  1311. err = validate_group(event);
  1312. else
  1313. err = validate_event(event);
  1314. event->pmu = tmp;
  1315. }
  1316. if (err) {
  1317. if (event->destroy)
  1318. event->destroy(event);
  1319. }
  1320. return err;
  1321. }
  1322. static struct pmu pmu = {
  1323. .pmu_enable = x86_pmu_enable,
  1324. .pmu_disable = x86_pmu_disable,
  1325. .event_init = x86_pmu_event_init,
  1326. .add = x86_pmu_add,
  1327. .del = x86_pmu_del,
  1328. .start = x86_pmu_start,
  1329. .stop = x86_pmu_stop,
  1330. .read = x86_pmu_read,
  1331. .start_txn = x86_pmu_start_txn,
  1332. .cancel_txn = x86_pmu_cancel_txn,
  1333. .commit_txn = x86_pmu_commit_txn,
  1334. };
  1335. /*
  1336. * callchain support
  1337. */
  1338. static void
  1339. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  1340. {
  1341. /* Ignore warnings */
  1342. }
  1343. static void backtrace_warning(void *data, char *msg)
  1344. {
  1345. /* Ignore warnings */
  1346. }
  1347. static int backtrace_stack(void *data, char *name)
  1348. {
  1349. return 0;
  1350. }
  1351. static void backtrace_address(void *data, unsigned long addr, int reliable)
  1352. {
  1353. struct perf_callchain_entry *entry = data;
  1354. perf_callchain_store(entry, addr);
  1355. }
  1356. static const struct stacktrace_ops backtrace_ops = {
  1357. .warning = backtrace_warning,
  1358. .warning_symbol = backtrace_warning_symbol,
  1359. .stack = backtrace_stack,
  1360. .address = backtrace_address,
  1361. .walk_stack = print_context_stack_bp,
  1362. };
  1363. void
  1364. perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1365. {
  1366. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1367. /* TODO: We don't support guest os callchain now */
  1368. return;
  1369. }
  1370. perf_callchain_store(entry, regs->ip);
  1371. dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
  1372. }
  1373. #ifdef CONFIG_COMPAT
  1374. static inline int
  1375. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1376. {
  1377. /* 32-bit process in 64-bit kernel. */
  1378. struct stack_frame_ia32 frame;
  1379. const void __user *fp;
  1380. if (!test_thread_flag(TIF_IA32))
  1381. return 0;
  1382. fp = compat_ptr(regs->bp);
  1383. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1384. unsigned long bytes;
  1385. frame.next_frame = 0;
  1386. frame.return_address = 0;
  1387. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1388. if (bytes != sizeof(frame))
  1389. break;
  1390. if (fp < compat_ptr(regs->sp))
  1391. break;
  1392. perf_callchain_store(entry, frame.return_address);
  1393. fp = compat_ptr(frame.next_frame);
  1394. }
  1395. return 1;
  1396. }
  1397. #else
  1398. static inline int
  1399. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1400. {
  1401. return 0;
  1402. }
  1403. #endif
  1404. void
  1405. perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1406. {
  1407. struct stack_frame frame;
  1408. const void __user *fp;
  1409. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1410. /* TODO: We don't support guest os callchain now */
  1411. return;
  1412. }
  1413. fp = (void __user *)regs->bp;
  1414. perf_callchain_store(entry, regs->ip);
  1415. if (perf_callchain_user32(regs, entry))
  1416. return;
  1417. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1418. unsigned long bytes;
  1419. frame.next_frame = NULL;
  1420. frame.return_address = 0;
  1421. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1422. if (bytes != sizeof(frame))
  1423. break;
  1424. if ((unsigned long)fp < regs->sp)
  1425. break;
  1426. perf_callchain_store(entry, frame.return_address);
  1427. fp = frame.next_frame;
  1428. }
  1429. }
  1430. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1431. {
  1432. unsigned long ip;
  1433. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1434. ip = perf_guest_cbs->get_guest_ip();
  1435. else
  1436. ip = instruction_pointer(regs);
  1437. return ip;
  1438. }
  1439. unsigned long perf_misc_flags(struct pt_regs *regs)
  1440. {
  1441. int misc = 0;
  1442. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1443. if (perf_guest_cbs->is_user_mode())
  1444. misc |= PERF_RECORD_MISC_GUEST_USER;
  1445. else
  1446. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1447. } else {
  1448. if (user_mode(regs))
  1449. misc |= PERF_RECORD_MISC_USER;
  1450. else
  1451. misc |= PERF_RECORD_MISC_KERNEL;
  1452. }
  1453. if (regs->flags & PERF_EFLAGS_EXACT)
  1454. misc |= PERF_RECORD_MISC_EXACT_IP;
  1455. return misc;
  1456. }