perf_event.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791
  1. /*
  2. * Performance events x86 architecture code
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2009 Jaswinder Singh Rajput
  7. * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  8. * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  9. * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10. * Copyright (C) 2009 Google, Inc., Stephane Eranian
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #include <linux/perf_event.h>
  15. #include <linux/capability.h>
  16. #include <linux/notifier.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/module.h>
  20. #include <linux/kdebug.h>
  21. #include <linux/sched.h>
  22. #include <linux/uaccess.h>
  23. #include <linux/slab.h>
  24. #include <linux/highmem.h>
  25. #include <linux/cpu.h>
  26. #include <linux/bitops.h>
  27. #include <asm/apic.h>
  28. #include <asm/stacktrace.h>
  29. #include <asm/nmi.h>
  30. #include <asm/compat.h>
  31. #if 0
  32. #undef wrmsrl
  33. #define wrmsrl(msr, val) \
  34. do { \
  35. trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
  36. (unsigned long)(val)); \
  37. native_write_msr((msr), (u32)((u64)(val)), \
  38. (u32)((u64)(val) >> 32)); \
  39. } while (0)
  40. #endif
  41. /*
  42. * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
  43. */
  44. static unsigned long
  45. copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
  46. {
  47. unsigned long offset, addr = (unsigned long)from;
  48. unsigned long size, len = 0;
  49. struct page *page;
  50. void *map;
  51. int ret;
  52. do {
  53. ret = __get_user_pages_fast(addr, 1, 0, &page);
  54. if (!ret)
  55. break;
  56. offset = addr & (PAGE_SIZE - 1);
  57. size = min(PAGE_SIZE - offset, n - len);
  58. map = kmap_atomic(page);
  59. memcpy(to, map+offset, size);
  60. kunmap_atomic(map);
  61. put_page(page);
  62. len += size;
  63. to += size;
  64. addr += size;
  65. } while (len < n);
  66. return len;
  67. }
  68. struct event_constraint {
  69. union {
  70. unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  71. u64 idxmsk64;
  72. };
  73. u64 code;
  74. u64 cmask;
  75. int weight;
  76. };
  77. struct amd_nb {
  78. int nb_id; /* NorthBridge id */
  79. int refcnt; /* reference count */
  80. struct perf_event *owners[X86_PMC_IDX_MAX];
  81. struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  82. };
  83. #define MAX_LBR_ENTRIES 16
  84. struct cpu_hw_events {
  85. /*
  86. * Generic x86 PMC bits
  87. */
  88. struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
  89. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  90. unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  91. int enabled;
  92. int n_events;
  93. int n_added;
  94. int n_txn;
  95. int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
  96. u64 tags[X86_PMC_IDX_MAX];
  97. struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
  98. unsigned int group_flag;
  99. /*
  100. * Intel DebugStore bits
  101. */
  102. struct debug_store *ds;
  103. u64 pebs_enabled;
  104. /*
  105. * Intel LBR bits
  106. */
  107. int lbr_users;
  108. void *lbr_context;
  109. struct perf_branch_stack lbr_stack;
  110. struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
  111. /*
  112. * AMD specific bits
  113. */
  114. struct amd_nb *amd_nb;
  115. };
  116. #define __EVENT_CONSTRAINT(c, n, m, w) {\
  117. { .idxmsk64 = (n) }, \
  118. .code = (c), \
  119. .cmask = (m), \
  120. .weight = (w), \
  121. }
  122. #define EVENT_CONSTRAINT(c, n, m) \
  123. __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
  124. /*
  125. * Constraint on the Event code.
  126. */
  127. #define INTEL_EVENT_CONSTRAINT(c, n) \
  128. EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
  129. /*
  130. * Constraint on the Event code + UMask + fixed-mask
  131. *
  132. * filter mask to validate fixed counter events.
  133. * the following filters disqualify for fixed counters:
  134. * - inv
  135. * - edge
  136. * - cnt-mask
  137. * The other filters are supported by fixed counters.
  138. * The any-thread option is supported starting with v3.
  139. */
  140. #define FIXED_EVENT_CONSTRAINT(c, n) \
  141. EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
  142. /*
  143. * Constraint on the Event code + UMask
  144. */
  145. #define PEBS_EVENT_CONSTRAINT(c, n) \
  146. EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
  147. #define EVENT_CONSTRAINT_END \
  148. EVENT_CONSTRAINT(0, 0, 0)
  149. #define for_each_event_constraint(e, c) \
  150. for ((e) = (c); (e)->weight; (e)++)
  151. union perf_capabilities {
  152. struct {
  153. u64 lbr_format : 6;
  154. u64 pebs_trap : 1;
  155. u64 pebs_arch_reg : 1;
  156. u64 pebs_format : 4;
  157. u64 smm_freeze : 1;
  158. };
  159. u64 capabilities;
  160. };
  161. /*
  162. * struct x86_pmu - generic x86 pmu
  163. */
  164. struct x86_pmu {
  165. /*
  166. * Generic x86 PMC bits
  167. */
  168. const char *name;
  169. int version;
  170. int (*handle_irq)(struct pt_regs *);
  171. void (*disable_all)(void);
  172. void (*enable_all)(int added);
  173. void (*enable)(struct perf_event *);
  174. void (*disable)(struct perf_event *);
  175. int (*hw_config)(struct perf_event *event);
  176. int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
  177. unsigned eventsel;
  178. unsigned perfctr;
  179. u64 (*event_map)(int);
  180. int max_events;
  181. int num_counters;
  182. int num_counters_fixed;
  183. int cntval_bits;
  184. u64 cntval_mask;
  185. int apic;
  186. u64 max_period;
  187. struct event_constraint *
  188. (*get_event_constraints)(struct cpu_hw_events *cpuc,
  189. struct perf_event *event);
  190. void (*put_event_constraints)(struct cpu_hw_events *cpuc,
  191. struct perf_event *event);
  192. struct event_constraint *event_constraints;
  193. void (*quirks)(void);
  194. int perfctr_second_write;
  195. int (*cpu_prepare)(int cpu);
  196. void (*cpu_starting)(int cpu);
  197. void (*cpu_dying)(int cpu);
  198. void (*cpu_dead)(int cpu);
  199. /*
  200. * Intel Arch Perfmon v2+
  201. */
  202. u64 intel_ctrl;
  203. union perf_capabilities intel_cap;
  204. /*
  205. * Intel DebugStore bits
  206. */
  207. int bts, pebs;
  208. int bts_active, pebs_active;
  209. int pebs_record_size;
  210. void (*drain_pebs)(struct pt_regs *regs);
  211. struct event_constraint *pebs_constraints;
  212. /*
  213. * Intel LBR
  214. */
  215. unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
  216. int lbr_nr; /* hardware stack size */
  217. };
  218. static struct x86_pmu x86_pmu __read_mostly;
  219. static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  220. .enabled = 1,
  221. };
  222. static int x86_perf_event_set_period(struct perf_event *event);
  223. /*
  224. * Generalized hw caching related hw_event table, filled
  225. * in on a per model basis. A value of 0 means
  226. * 'not supported', -1 means 'hw_event makes no sense on
  227. * this CPU', any other value means the raw hw_event
  228. * ID.
  229. */
  230. #define C(x) PERF_COUNT_HW_CACHE_##x
  231. static u64 __read_mostly hw_cache_event_ids
  232. [PERF_COUNT_HW_CACHE_MAX]
  233. [PERF_COUNT_HW_CACHE_OP_MAX]
  234. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  235. /*
  236. * Propagate event elapsed time into the generic event.
  237. * Can only be executed on the CPU where the event is active.
  238. * Returns the delta events processed.
  239. */
  240. static u64
  241. x86_perf_event_update(struct perf_event *event)
  242. {
  243. struct hw_perf_event *hwc = &event->hw;
  244. int shift = 64 - x86_pmu.cntval_bits;
  245. u64 prev_raw_count, new_raw_count;
  246. int idx = hwc->idx;
  247. s64 delta;
  248. if (idx == X86_PMC_IDX_FIXED_BTS)
  249. return 0;
  250. /*
  251. * Careful: an NMI might modify the previous event value.
  252. *
  253. * Our tactic to handle this is to first atomically read and
  254. * exchange a new raw count - then add that new-prev delta
  255. * count to the generic event atomically:
  256. */
  257. again:
  258. prev_raw_count = local64_read(&hwc->prev_count);
  259. rdmsrl(hwc->event_base + idx, new_raw_count);
  260. if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  261. new_raw_count) != prev_raw_count)
  262. goto again;
  263. /*
  264. * Now we have the new raw value and have updated the prev
  265. * timestamp already. We can now calculate the elapsed delta
  266. * (event-)time and add that to the generic event.
  267. *
  268. * Careful, not all hw sign-extends above the physical width
  269. * of the count.
  270. */
  271. delta = (new_raw_count << shift) - (prev_raw_count << shift);
  272. delta >>= shift;
  273. local64_add(delta, &event->count);
  274. local64_sub(delta, &hwc->period_left);
  275. return new_raw_count;
  276. }
  277. static atomic_t active_events;
  278. static DEFINE_MUTEX(pmc_reserve_mutex);
  279. #ifdef CONFIG_X86_LOCAL_APIC
  280. static bool reserve_pmc_hardware(void)
  281. {
  282. int i;
  283. if (nmi_watchdog == NMI_LOCAL_APIC)
  284. disable_lapic_nmi_watchdog();
  285. for (i = 0; i < x86_pmu.num_counters; i++) {
  286. if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
  287. goto perfctr_fail;
  288. }
  289. for (i = 0; i < x86_pmu.num_counters; i++) {
  290. if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
  291. goto eventsel_fail;
  292. }
  293. return true;
  294. eventsel_fail:
  295. for (i--; i >= 0; i--)
  296. release_evntsel_nmi(x86_pmu.eventsel + i);
  297. i = x86_pmu.num_counters;
  298. perfctr_fail:
  299. for (i--; i >= 0; i--)
  300. release_perfctr_nmi(x86_pmu.perfctr + i);
  301. if (nmi_watchdog == NMI_LOCAL_APIC)
  302. enable_lapic_nmi_watchdog();
  303. return false;
  304. }
  305. static void release_pmc_hardware(void)
  306. {
  307. int i;
  308. for (i = 0; i < x86_pmu.num_counters; i++) {
  309. release_perfctr_nmi(x86_pmu.perfctr + i);
  310. release_evntsel_nmi(x86_pmu.eventsel + i);
  311. }
  312. if (nmi_watchdog == NMI_LOCAL_APIC)
  313. enable_lapic_nmi_watchdog();
  314. }
  315. #else
  316. static bool reserve_pmc_hardware(void) { return true; }
  317. static void release_pmc_hardware(void) {}
  318. #endif
  319. static bool check_hw_exists(void)
  320. {
  321. u64 val, val_new = 0;
  322. int ret = 0;
  323. val = 0xabcdUL;
  324. ret |= checking_wrmsrl(x86_pmu.perfctr, val);
  325. ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
  326. if (ret || val != val_new)
  327. return false;
  328. return true;
  329. }
  330. static void reserve_ds_buffers(void);
  331. static void release_ds_buffers(void);
  332. static void hw_perf_event_destroy(struct perf_event *event)
  333. {
  334. if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
  335. release_pmc_hardware();
  336. release_ds_buffers();
  337. mutex_unlock(&pmc_reserve_mutex);
  338. }
  339. }
  340. static inline int x86_pmu_initialized(void)
  341. {
  342. return x86_pmu.handle_irq != NULL;
  343. }
  344. static inline int
  345. set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
  346. {
  347. unsigned int cache_type, cache_op, cache_result;
  348. u64 config, val;
  349. config = attr->config;
  350. cache_type = (config >> 0) & 0xff;
  351. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  352. return -EINVAL;
  353. cache_op = (config >> 8) & 0xff;
  354. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  355. return -EINVAL;
  356. cache_result = (config >> 16) & 0xff;
  357. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  358. return -EINVAL;
  359. val = hw_cache_event_ids[cache_type][cache_op][cache_result];
  360. if (val == 0)
  361. return -ENOENT;
  362. if (val == -1)
  363. return -EINVAL;
  364. hwc->config |= val;
  365. return 0;
  366. }
  367. static int x86_setup_perfctr(struct perf_event *event)
  368. {
  369. struct perf_event_attr *attr = &event->attr;
  370. struct hw_perf_event *hwc = &event->hw;
  371. u64 config;
  372. if (!hwc->sample_period) {
  373. hwc->sample_period = x86_pmu.max_period;
  374. hwc->last_period = hwc->sample_period;
  375. local64_set(&hwc->period_left, hwc->sample_period);
  376. } else {
  377. /*
  378. * If we have a PMU initialized but no APIC
  379. * interrupts, we cannot sample hardware
  380. * events (user-space has to fall back and
  381. * sample via a hrtimer based software event):
  382. */
  383. if (!x86_pmu.apic)
  384. return -EOPNOTSUPP;
  385. }
  386. if (attr->type == PERF_TYPE_RAW)
  387. return 0;
  388. if (attr->type == PERF_TYPE_HW_CACHE)
  389. return set_ext_hw_attr(hwc, attr);
  390. if (attr->config >= x86_pmu.max_events)
  391. return -EINVAL;
  392. /*
  393. * The generic map:
  394. */
  395. config = x86_pmu.event_map(attr->config);
  396. if (config == 0)
  397. return -ENOENT;
  398. if (config == -1LL)
  399. return -EINVAL;
  400. /*
  401. * Branch tracing:
  402. */
  403. if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
  404. (hwc->sample_period == 1)) {
  405. /* BTS is not supported by this architecture. */
  406. if (!x86_pmu.bts_active)
  407. return -EOPNOTSUPP;
  408. /* BTS is currently only allowed for user-mode. */
  409. if (!attr->exclude_kernel)
  410. return -EOPNOTSUPP;
  411. }
  412. hwc->config |= config;
  413. return 0;
  414. }
  415. static int x86_pmu_hw_config(struct perf_event *event)
  416. {
  417. if (event->attr.precise_ip) {
  418. int precise = 0;
  419. /* Support for constant skid */
  420. if (x86_pmu.pebs_active) {
  421. precise++;
  422. /* Support for IP fixup */
  423. if (x86_pmu.lbr_nr)
  424. precise++;
  425. }
  426. if (event->attr.precise_ip > precise)
  427. return -EOPNOTSUPP;
  428. }
  429. /*
  430. * Generate PMC IRQs:
  431. * (keep 'enabled' bit clear for now)
  432. */
  433. event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
  434. /*
  435. * Count user and OS events unless requested not to
  436. */
  437. if (!event->attr.exclude_user)
  438. event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
  439. if (!event->attr.exclude_kernel)
  440. event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
  441. if (event->attr.type == PERF_TYPE_RAW)
  442. event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
  443. return x86_setup_perfctr(event);
  444. }
  445. /*
  446. * Setup the hardware configuration for a given attr_type
  447. */
  448. static int __x86_pmu_event_init(struct perf_event *event)
  449. {
  450. int err;
  451. if (!x86_pmu_initialized())
  452. return -ENODEV;
  453. err = 0;
  454. if (!atomic_inc_not_zero(&active_events)) {
  455. mutex_lock(&pmc_reserve_mutex);
  456. if (atomic_read(&active_events) == 0) {
  457. if (!reserve_pmc_hardware())
  458. err = -EBUSY;
  459. else
  460. reserve_ds_buffers();
  461. }
  462. if (!err)
  463. atomic_inc(&active_events);
  464. mutex_unlock(&pmc_reserve_mutex);
  465. }
  466. if (err)
  467. return err;
  468. event->destroy = hw_perf_event_destroy;
  469. event->hw.idx = -1;
  470. event->hw.last_cpu = -1;
  471. event->hw.last_tag = ~0ULL;
  472. return x86_pmu.hw_config(event);
  473. }
  474. static void x86_pmu_disable_all(void)
  475. {
  476. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  477. int idx;
  478. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  479. u64 val;
  480. if (!test_bit(idx, cpuc->active_mask))
  481. continue;
  482. rdmsrl(x86_pmu.eventsel + idx, val);
  483. if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
  484. continue;
  485. val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
  486. wrmsrl(x86_pmu.eventsel + idx, val);
  487. }
  488. }
  489. static void x86_pmu_disable(struct pmu *pmu)
  490. {
  491. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  492. if (!x86_pmu_initialized())
  493. return;
  494. if (!cpuc->enabled)
  495. return;
  496. cpuc->n_added = 0;
  497. cpuc->enabled = 0;
  498. barrier();
  499. x86_pmu.disable_all();
  500. }
  501. static void x86_pmu_enable_all(int added)
  502. {
  503. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  504. int idx;
  505. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  506. struct perf_event *event = cpuc->events[idx];
  507. u64 val;
  508. if (!test_bit(idx, cpuc->active_mask))
  509. continue;
  510. val = event->hw.config;
  511. val |= ARCH_PERFMON_EVENTSEL_ENABLE;
  512. wrmsrl(x86_pmu.eventsel + idx, val);
  513. }
  514. }
  515. static struct pmu pmu;
  516. static inline int is_x86_event(struct perf_event *event)
  517. {
  518. return event->pmu == &pmu;
  519. }
  520. static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
  521. {
  522. struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
  523. unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  524. int i, j, w, wmax, num = 0;
  525. struct hw_perf_event *hwc;
  526. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  527. for (i = 0; i < n; i++) {
  528. c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
  529. constraints[i] = c;
  530. }
  531. /*
  532. * fastpath, try to reuse previous register
  533. */
  534. for (i = 0; i < n; i++) {
  535. hwc = &cpuc->event_list[i]->hw;
  536. c = constraints[i];
  537. /* never assigned */
  538. if (hwc->idx == -1)
  539. break;
  540. /* constraint still honored */
  541. if (!test_bit(hwc->idx, c->idxmsk))
  542. break;
  543. /* not already used */
  544. if (test_bit(hwc->idx, used_mask))
  545. break;
  546. __set_bit(hwc->idx, used_mask);
  547. if (assign)
  548. assign[i] = hwc->idx;
  549. }
  550. if (i == n)
  551. goto done;
  552. /*
  553. * begin slow path
  554. */
  555. bitmap_zero(used_mask, X86_PMC_IDX_MAX);
  556. /*
  557. * weight = number of possible counters
  558. *
  559. * 1 = most constrained, only works on one counter
  560. * wmax = least constrained, works on any counter
  561. *
  562. * assign events to counters starting with most
  563. * constrained events.
  564. */
  565. wmax = x86_pmu.num_counters;
  566. /*
  567. * when fixed event counters are present,
  568. * wmax is incremented by 1 to account
  569. * for one more choice
  570. */
  571. if (x86_pmu.num_counters_fixed)
  572. wmax++;
  573. for (w = 1, num = n; num && w <= wmax; w++) {
  574. /* for each event */
  575. for (i = 0; num && i < n; i++) {
  576. c = constraints[i];
  577. hwc = &cpuc->event_list[i]->hw;
  578. if (c->weight != w)
  579. continue;
  580. for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
  581. if (!test_bit(j, used_mask))
  582. break;
  583. }
  584. if (j == X86_PMC_IDX_MAX)
  585. break;
  586. __set_bit(j, used_mask);
  587. if (assign)
  588. assign[i] = j;
  589. num--;
  590. }
  591. }
  592. done:
  593. /*
  594. * scheduling failed or is just a simulation,
  595. * free resources if necessary
  596. */
  597. if (!assign || num) {
  598. for (i = 0; i < n; i++) {
  599. if (x86_pmu.put_event_constraints)
  600. x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
  601. }
  602. }
  603. return num ? -ENOSPC : 0;
  604. }
  605. /*
  606. * dogrp: true if must collect siblings events (group)
  607. * returns total number of events and error code
  608. */
  609. static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
  610. {
  611. struct perf_event *event;
  612. int n, max_count;
  613. max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
  614. /* current number of events already accepted */
  615. n = cpuc->n_events;
  616. if (is_x86_event(leader)) {
  617. if (n >= max_count)
  618. return -ENOSPC;
  619. cpuc->event_list[n] = leader;
  620. n++;
  621. }
  622. if (!dogrp)
  623. return n;
  624. list_for_each_entry(event, &leader->sibling_list, group_entry) {
  625. if (!is_x86_event(event) ||
  626. event->state <= PERF_EVENT_STATE_OFF)
  627. continue;
  628. if (n >= max_count)
  629. return -ENOSPC;
  630. cpuc->event_list[n] = event;
  631. n++;
  632. }
  633. return n;
  634. }
  635. static inline void x86_assign_hw_event(struct perf_event *event,
  636. struct cpu_hw_events *cpuc, int i)
  637. {
  638. struct hw_perf_event *hwc = &event->hw;
  639. hwc->idx = cpuc->assign[i];
  640. hwc->last_cpu = smp_processor_id();
  641. hwc->last_tag = ++cpuc->tags[i];
  642. if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
  643. hwc->config_base = 0;
  644. hwc->event_base = 0;
  645. } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
  646. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  647. /*
  648. * We set it so that event_base + idx in wrmsr/rdmsr maps to
  649. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  650. */
  651. hwc->event_base =
  652. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  653. } else {
  654. hwc->config_base = x86_pmu.eventsel;
  655. hwc->event_base = x86_pmu.perfctr;
  656. }
  657. }
  658. static inline int match_prev_assignment(struct hw_perf_event *hwc,
  659. struct cpu_hw_events *cpuc,
  660. int i)
  661. {
  662. return hwc->idx == cpuc->assign[i] &&
  663. hwc->last_cpu == smp_processor_id() &&
  664. hwc->last_tag == cpuc->tags[i];
  665. }
  666. static void x86_pmu_start(struct perf_event *event, int flags);
  667. static void x86_pmu_stop(struct perf_event *event, int flags);
  668. static void x86_pmu_enable(struct pmu *pmu)
  669. {
  670. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  671. struct perf_event *event;
  672. struct hw_perf_event *hwc;
  673. int i, added = cpuc->n_added;
  674. if (!x86_pmu_initialized())
  675. return;
  676. if (cpuc->enabled)
  677. return;
  678. if (cpuc->n_added) {
  679. int n_running = cpuc->n_events - cpuc->n_added;
  680. /*
  681. * apply assignment obtained either from
  682. * hw_perf_group_sched_in() or x86_pmu_enable()
  683. *
  684. * step1: save events moving to new counters
  685. * step2: reprogram moved events into new counters
  686. */
  687. for (i = 0; i < n_running; i++) {
  688. event = cpuc->event_list[i];
  689. hwc = &event->hw;
  690. /*
  691. * we can avoid reprogramming counter if:
  692. * - assigned same counter as last time
  693. * - running on same CPU as last time
  694. * - no other event has used the counter since
  695. */
  696. if (hwc->idx == -1 ||
  697. match_prev_assignment(hwc, cpuc, i))
  698. continue;
  699. /*
  700. * Ensure we don't accidentally enable a stopped
  701. * counter simply because we rescheduled.
  702. */
  703. if (hwc->state & PERF_HES_STOPPED)
  704. hwc->state |= PERF_HES_ARCH;
  705. x86_pmu_stop(event, PERF_EF_UPDATE);
  706. }
  707. for (i = 0; i < cpuc->n_events; i++) {
  708. event = cpuc->event_list[i];
  709. hwc = &event->hw;
  710. if (!match_prev_assignment(hwc, cpuc, i))
  711. x86_assign_hw_event(event, cpuc, i);
  712. else if (i < n_running)
  713. continue;
  714. if (hwc->state & PERF_HES_ARCH)
  715. continue;
  716. x86_pmu_start(event, PERF_EF_RELOAD);
  717. }
  718. cpuc->n_added = 0;
  719. perf_events_lapic_init();
  720. }
  721. cpuc->enabled = 1;
  722. barrier();
  723. x86_pmu.enable_all(added);
  724. }
  725. static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
  726. u64 enable_mask)
  727. {
  728. wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
  729. }
  730. static inline void x86_pmu_disable_event(struct perf_event *event)
  731. {
  732. struct hw_perf_event *hwc = &event->hw;
  733. wrmsrl(hwc->config_base + hwc->idx, hwc->config);
  734. }
  735. static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
  736. /*
  737. * Set the next IRQ period, based on the hwc->period_left value.
  738. * To be called with the event disabled in hw:
  739. */
  740. static int
  741. x86_perf_event_set_period(struct perf_event *event)
  742. {
  743. struct hw_perf_event *hwc = &event->hw;
  744. s64 left = local64_read(&hwc->period_left);
  745. s64 period = hwc->sample_period;
  746. int ret = 0, idx = hwc->idx;
  747. if (idx == X86_PMC_IDX_FIXED_BTS)
  748. return 0;
  749. /*
  750. * If we are way outside a reasonable range then just skip forward:
  751. */
  752. if (unlikely(left <= -period)) {
  753. left = period;
  754. local64_set(&hwc->period_left, left);
  755. hwc->last_period = period;
  756. ret = 1;
  757. }
  758. if (unlikely(left <= 0)) {
  759. left += period;
  760. local64_set(&hwc->period_left, left);
  761. hwc->last_period = period;
  762. ret = 1;
  763. }
  764. /*
  765. * Quirk: certain CPUs dont like it if just 1 hw_event is left:
  766. */
  767. if (unlikely(left < 2))
  768. left = 2;
  769. if (left > x86_pmu.max_period)
  770. left = x86_pmu.max_period;
  771. per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
  772. /*
  773. * The hw event starts counting from this event offset,
  774. * mark it to be able to extra future deltas:
  775. */
  776. local64_set(&hwc->prev_count, (u64)-left);
  777. wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
  778. /*
  779. * Due to erratum on certan cpu we need
  780. * a second write to be sure the register
  781. * is updated properly
  782. */
  783. if (x86_pmu.perfctr_second_write) {
  784. wrmsrl(hwc->event_base + idx,
  785. (u64)(-left) & x86_pmu.cntval_mask);
  786. }
  787. perf_event_update_userpage(event);
  788. return ret;
  789. }
  790. static void x86_pmu_enable_event(struct perf_event *event)
  791. {
  792. if (__this_cpu_read(cpu_hw_events.enabled))
  793. __x86_pmu_enable_event(&event->hw,
  794. ARCH_PERFMON_EVENTSEL_ENABLE);
  795. }
  796. /*
  797. * Add a single event to the PMU.
  798. *
  799. * The event is added to the group of enabled events
  800. * but only if it can be scehduled with existing events.
  801. */
  802. static int x86_pmu_add(struct perf_event *event, int flags)
  803. {
  804. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  805. struct hw_perf_event *hwc;
  806. int assign[X86_PMC_IDX_MAX];
  807. int n, n0, ret;
  808. hwc = &event->hw;
  809. perf_pmu_disable(event->pmu);
  810. n0 = cpuc->n_events;
  811. ret = n = collect_events(cpuc, event, false);
  812. if (ret < 0)
  813. goto out;
  814. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  815. if (!(flags & PERF_EF_START))
  816. hwc->state |= PERF_HES_ARCH;
  817. /*
  818. * If group events scheduling transaction was started,
  819. * skip the schedulability test here, it will be peformed
  820. * at commit time (->commit_txn) as a whole
  821. */
  822. if (cpuc->group_flag & PERF_EVENT_TXN)
  823. goto done_collect;
  824. ret = x86_pmu.schedule_events(cpuc, n, assign);
  825. if (ret)
  826. goto out;
  827. /*
  828. * copy new assignment, now we know it is possible
  829. * will be used by hw_perf_enable()
  830. */
  831. memcpy(cpuc->assign, assign, n*sizeof(int));
  832. done_collect:
  833. cpuc->n_events = n;
  834. cpuc->n_added += n - n0;
  835. cpuc->n_txn += n - n0;
  836. ret = 0;
  837. out:
  838. perf_pmu_enable(event->pmu);
  839. return ret;
  840. }
  841. static void x86_pmu_start(struct perf_event *event, int flags)
  842. {
  843. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  844. int idx = event->hw.idx;
  845. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  846. return;
  847. if (WARN_ON_ONCE(idx == -1))
  848. return;
  849. if (flags & PERF_EF_RELOAD) {
  850. WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
  851. x86_perf_event_set_period(event);
  852. }
  853. event->hw.state = 0;
  854. cpuc->events[idx] = event;
  855. __set_bit(idx, cpuc->active_mask);
  856. __set_bit(idx, cpuc->running);
  857. x86_pmu.enable(event);
  858. perf_event_update_userpage(event);
  859. }
  860. void perf_event_print_debug(void)
  861. {
  862. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  863. u64 pebs;
  864. struct cpu_hw_events *cpuc;
  865. unsigned long flags;
  866. int cpu, idx;
  867. if (!x86_pmu.num_counters)
  868. return;
  869. local_irq_save(flags);
  870. cpu = smp_processor_id();
  871. cpuc = &per_cpu(cpu_hw_events, cpu);
  872. if (x86_pmu.version >= 2) {
  873. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  874. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  875. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  876. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  877. rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
  878. pr_info("\n");
  879. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  880. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  881. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  882. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  883. pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
  884. }
  885. pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
  886. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  887. rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
  888. rdmsrl(x86_pmu.perfctr + idx, pmc_count);
  889. prev_left = per_cpu(pmc_prev_left[idx], cpu);
  890. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  891. cpu, idx, pmc_ctrl);
  892. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  893. cpu, idx, pmc_count);
  894. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  895. cpu, idx, prev_left);
  896. }
  897. for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
  898. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  899. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  900. cpu, idx, pmc_count);
  901. }
  902. local_irq_restore(flags);
  903. }
  904. static void x86_pmu_stop(struct perf_event *event, int flags)
  905. {
  906. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  907. struct hw_perf_event *hwc = &event->hw;
  908. if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
  909. x86_pmu.disable(event);
  910. cpuc->events[hwc->idx] = NULL;
  911. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  912. hwc->state |= PERF_HES_STOPPED;
  913. }
  914. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  915. /*
  916. * Drain the remaining delta count out of a event
  917. * that we are disabling:
  918. */
  919. x86_perf_event_update(event);
  920. hwc->state |= PERF_HES_UPTODATE;
  921. }
  922. }
  923. static void x86_pmu_del(struct perf_event *event, int flags)
  924. {
  925. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  926. int i;
  927. /*
  928. * If we're called during a txn, we don't need to do anything.
  929. * The events never got scheduled and ->cancel_txn will truncate
  930. * the event_list.
  931. */
  932. if (cpuc->group_flag & PERF_EVENT_TXN)
  933. return;
  934. x86_pmu_stop(event, PERF_EF_UPDATE);
  935. for (i = 0; i < cpuc->n_events; i++) {
  936. if (event == cpuc->event_list[i]) {
  937. if (x86_pmu.put_event_constraints)
  938. x86_pmu.put_event_constraints(cpuc, event);
  939. while (++i < cpuc->n_events)
  940. cpuc->event_list[i-1] = cpuc->event_list[i];
  941. --cpuc->n_events;
  942. break;
  943. }
  944. }
  945. perf_event_update_userpage(event);
  946. }
  947. static int x86_pmu_handle_irq(struct pt_regs *regs)
  948. {
  949. struct perf_sample_data data;
  950. struct cpu_hw_events *cpuc;
  951. struct perf_event *event;
  952. int idx, handled = 0;
  953. u64 val;
  954. perf_sample_data_init(&data, 0);
  955. cpuc = &__get_cpu_var(cpu_hw_events);
  956. for (idx = 0; idx < x86_pmu.num_counters; idx++) {
  957. if (!test_bit(idx, cpuc->active_mask)) {
  958. /*
  959. * Though we deactivated the counter some cpus
  960. * might still deliver spurious interrupts still
  961. * in flight. Catch them:
  962. */
  963. if (__test_and_clear_bit(idx, cpuc->running))
  964. handled++;
  965. continue;
  966. }
  967. event = cpuc->events[idx];
  968. val = x86_perf_event_update(event);
  969. if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
  970. continue;
  971. /*
  972. * event overflow
  973. */
  974. handled++;
  975. data.period = event->hw.last_period;
  976. if (!x86_perf_event_set_period(event))
  977. continue;
  978. if (perf_event_overflow(event, 1, &data, regs))
  979. x86_pmu_stop(event, 0);
  980. }
  981. if (handled)
  982. inc_irq_stat(apic_perf_irqs);
  983. return handled;
  984. }
  985. void perf_events_lapic_init(void)
  986. {
  987. if (!x86_pmu.apic || !x86_pmu_initialized())
  988. return;
  989. /*
  990. * Always use NMI for PMU
  991. */
  992. apic_write(APIC_LVTPC, APIC_DM_NMI);
  993. }
  994. struct pmu_nmi_state {
  995. unsigned int marked;
  996. int handled;
  997. };
  998. static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
  999. static int __kprobes
  1000. perf_event_nmi_handler(struct notifier_block *self,
  1001. unsigned long cmd, void *__args)
  1002. {
  1003. struct die_args *args = __args;
  1004. unsigned int this_nmi;
  1005. int handled;
  1006. if (!atomic_read(&active_events))
  1007. return NOTIFY_DONE;
  1008. switch (cmd) {
  1009. case DIE_NMI:
  1010. case DIE_NMI_IPI:
  1011. break;
  1012. case DIE_NMIUNKNOWN:
  1013. this_nmi = percpu_read(irq_stat.__nmi_count);
  1014. if (this_nmi != __this_cpu_read(pmu_nmi.marked))
  1015. /* let the kernel handle the unknown nmi */
  1016. return NOTIFY_DONE;
  1017. /*
  1018. * This one is a PMU back-to-back nmi. Two events
  1019. * trigger 'simultaneously' raising two back-to-back
  1020. * NMIs. If the first NMI handles both, the latter
  1021. * will be empty and daze the CPU. So, we drop it to
  1022. * avoid false-positive 'unknown nmi' messages.
  1023. */
  1024. return NOTIFY_STOP;
  1025. default:
  1026. return NOTIFY_DONE;
  1027. }
  1028. apic_write(APIC_LVTPC, APIC_DM_NMI);
  1029. handled = x86_pmu.handle_irq(args->regs);
  1030. if (!handled)
  1031. return NOTIFY_DONE;
  1032. this_nmi = percpu_read(irq_stat.__nmi_count);
  1033. if ((handled > 1) ||
  1034. /* the next nmi could be a back-to-back nmi */
  1035. ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
  1036. (__this_cpu_read(pmu_nmi.handled) > 1))) {
  1037. /*
  1038. * We could have two subsequent back-to-back nmis: The
  1039. * first handles more than one counter, the 2nd
  1040. * handles only one counter and the 3rd handles no
  1041. * counter.
  1042. *
  1043. * This is the 2nd nmi because the previous was
  1044. * handling more than one counter. We will mark the
  1045. * next (3rd) and then drop it if unhandled.
  1046. */
  1047. __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
  1048. __this_cpu_write(pmu_nmi.handled, handled);
  1049. }
  1050. return NOTIFY_STOP;
  1051. }
  1052. static __read_mostly struct notifier_block perf_event_nmi_notifier = {
  1053. .notifier_call = perf_event_nmi_handler,
  1054. .next = NULL,
  1055. .priority = 1
  1056. };
  1057. static struct event_constraint unconstrained;
  1058. static struct event_constraint emptyconstraint;
  1059. static struct event_constraint *
  1060. x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
  1061. {
  1062. struct event_constraint *c;
  1063. if (x86_pmu.event_constraints) {
  1064. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1065. if ((event->hw.config & c->cmask) == c->code)
  1066. return c;
  1067. }
  1068. }
  1069. return &unconstrained;
  1070. }
  1071. #include "perf_event_amd.c"
  1072. #include "perf_event_p6.c"
  1073. #include "perf_event_p4.c"
  1074. #include "perf_event_intel_lbr.c"
  1075. #include "perf_event_intel_ds.c"
  1076. #include "perf_event_intel.c"
  1077. static int __cpuinit
  1078. x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
  1079. {
  1080. unsigned int cpu = (long)hcpu;
  1081. int ret = NOTIFY_OK;
  1082. switch (action & ~CPU_TASKS_FROZEN) {
  1083. case CPU_UP_PREPARE:
  1084. if (x86_pmu.cpu_prepare)
  1085. ret = x86_pmu.cpu_prepare(cpu);
  1086. break;
  1087. case CPU_STARTING:
  1088. if (x86_pmu.cpu_starting)
  1089. x86_pmu.cpu_starting(cpu);
  1090. break;
  1091. case CPU_DYING:
  1092. if (x86_pmu.cpu_dying)
  1093. x86_pmu.cpu_dying(cpu);
  1094. break;
  1095. case CPU_UP_CANCELED:
  1096. case CPU_DEAD:
  1097. if (x86_pmu.cpu_dead)
  1098. x86_pmu.cpu_dead(cpu);
  1099. break;
  1100. default:
  1101. break;
  1102. }
  1103. return ret;
  1104. }
  1105. static void __init pmu_check_apic(void)
  1106. {
  1107. if (cpu_has_apic)
  1108. return;
  1109. x86_pmu.apic = 0;
  1110. pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
  1111. pr_info("no hardware sampling interrupt available.\n");
  1112. }
  1113. void __init init_hw_perf_events(void)
  1114. {
  1115. struct event_constraint *c;
  1116. int err;
  1117. pr_info("Performance Events: ");
  1118. switch (boot_cpu_data.x86_vendor) {
  1119. case X86_VENDOR_INTEL:
  1120. err = intel_pmu_init();
  1121. break;
  1122. case X86_VENDOR_AMD:
  1123. err = amd_pmu_init();
  1124. break;
  1125. default:
  1126. return;
  1127. }
  1128. if (err != 0) {
  1129. pr_cont("no PMU driver, software events only.\n");
  1130. return;
  1131. }
  1132. pmu_check_apic();
  1133. /* sanity check that the hardware exists or is emulated */
  1134. if (!check_hw_exists()) {
  1135. pr_cont("Broken PMU hardware detected, software events only.\n");
  1136. return;
  1137. }
  1138. pr_cont("%s PMU driver.\n", x86_pmu.name);
  1139. if (x86_pmu.quirks)
  1140. x86_pmu.quirks();
  1141. if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
  1142. WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
  1143. x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
  1144. x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
  1145. }
  1146. x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
  1147. if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
  1148. WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
  1149. x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
  1150. x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
  1151. }
  1152. x86_pmu.intel_ctrl |=
  1153. ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  1154. perf_events_lapic_init();
  1155. register_die_notifier(&perf_event_nmi_notifier);
  1156. unconstrained = (struct event_constraint)
  1157. __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
  1158. 0, x86_pmu.num_counters);
  1159. if (x86_pmu.event_constraints) {
  1160. for_each_event_constraint(c, x86_pmu.event_constraints) {
  1161. if (c->cmask != X86_RAW_EVENT_MASK)
  1162. continue;
  1163. c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
  1164. c->weight += x86_pmu.num_counters;
  1165. }
  1166. }
  1167. pr_info("... version: %d\n", x86_pmu.version);
  1168. pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
  1169. pr_info("... generic registers: %d\n", x86_pmu.num_counters);
  1170. pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
  1171. pr_info("... max period: %016Lx\n", x86_pmu.max_period);
  1172. pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
  1173. pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
  1174. perf_pmu_register(&pmu);
  1175. perf_cpu_notifier(x86_pmu_notifier);
  1176. }
  1177. static inline void x86_pmu_read(struct perf_event *event)
  1178. {
  1179. x86_perf_event_update(event);
  1180. }
  1181. /*
  1182. * Start group events scheduling transaction
  1183. * Set the flag to make pmu::enable() not perform the
  1184. * schedulability test, it will be performed at commit time
  1185. */
  1186. static void x86_pmu_start_txn(struct pmu *pmu)
  1187. {
  1188. perf_pmu_disable(pmu);
  1189. __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
  1190. __this_cpu_write(cpu_hw_events.n_txn, 0);
  1191. }
  1192. /*
  1193. * Stop group events scheduling transaction
  1194. * Clear the flag and pmu::enable() will perform the
  1195. * schedulability test.
  1196. */
  1197. static void x86_pmu_cancel_txn(struct pmu *pmu)
  1198. {
  1199. __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
  1200. /*
  1201. * Truncate the collected events.
  1202. */
  1203. __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
  1204. __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
  1205. perf_pmu_enable(pmu);
  1206. }
  1207. /*
  1208. * Commit group events scheduling transaction
  1209. * Perform the group schedulability test as a whole
  1210. * Return 0 if success
  1211. */
  1212. static int x86_pmu_commit_txn(struct pmu *pmu)
  1213. {
  1214. struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
  1215. int assign[X86_PMC_IDX_MAX];
  1216. int n, ret;
  1217. n = cpuc->n_events;
  1218. if (!x86_pmu_initialized())
  1219. return -EAGAIN;
  1220. ret = x86_pmu.schedule_events(cpuc, n, assign);
  1221. if (ret)
  1222. return ret;
  1223. /*
  1224. * copy new assignment, now we know it is possible
  1225. * will be used by hw_perf_enable()
  1226. */
  1227. memcpy(cpuc->assign, assign, n*sizeof(int));
  1228. cpuc->group_flag &= ~PERF_EVENT_TXN;
  1229. perf_pmu_enable(pmu);
  1230. return 0;
  1231. }
  1232. /*
  1233. * validate that we can schedule this event
  1234. */
  1235. static int validate_event(struct perf_event *event)
  1236. {
  1237. struct cpu_hw_events *fake_cpuc;
  1238. struct event_constraint *c;
  1239. int ret = 0;
  1240. fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
  1241. if (!fake_cpuc)
  1242. return -ENOMEM;
  1243. c = x86_pmu.get_event_constraints(fake_cpuc, event);
  1244. if (!c || !c->weight)
  1245. ret = -ENOSPC;
  1246. if (x86_pmu.put_event_constraints)
  1247. x86_pmu.put_event_constraints(fake_cpuc, event);
  1248. kfree(fake_cpuc);
  1249. return ret;
  1250. }
  1251. /*
  1252. * validate a single event group
  1253. *
  1254. * validation include:
  1255. * - check events are compatible which each other
  1256. * - events do not compete for the same counter
  1257. * - number of events <= number of counters
  1258. *
  1259. * validation ensures the group can be loaded onto the
  1260. * PMU if it was the only group available.
  1261. */
  1262. static int validate_group(struct perf_event *event)
  1263. {
  1264. struct perf_event *leader = event->group_leader;
  1265. struct cpu_hw_events *fake_cpuc;
  1266. int ret, n;
  1267. ret = -ENOMEM;
  1268. fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
  1269. if (!fake_cpuc)
  1270. goto out;
  1271. /*
  1272. * the event is not yet connected with its
  1273. * siblings therefore we must first collect
  1274. * existing siblings, then add the new event
  1275. * before we can simulate the scheduling
  1276. */
  1277. ret = -ENOSPC;
  1278. n = collect_events(fake_cpuc, leader, true);
  1279. if (n < 0)
  1280. goto out_free;
  1281. fake_cpuc->n_events = n;
  1282. n = collect_events(fake_cpuc, event, false);
  1283. if (n < 0)
  1284. goto out_free;
  1285. fake_cpuc->n_events = n;
  1286. ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
  1287. out_free:
  1288. kfree(fake_cpuc);
  1289. out:
  1290. return ret;
  1291. }
  1292. int x86_pmu_event_init(struct perf_event *event)
  1293. {
  1294. struct pmu *tmp;
  1295. int err;
  1296. switch (event->attr.type) {
  1297. case PERF_TYPE_RAW:
  1298. case PERF_TYPE_HARDWARE:
  1299. case PERF_TYPE_HW_CACHE:
  1300. break;
  1301. default:
  1302. return -ENOENT;
  1303. }
  1304. err = __x86_pmu_event_init(event);
  1305. if (!err) {
  1306. /*
  1307. * we temporarily connect event to its pmu
  1308. * such that validate_group() can classify
  1309. * it as an x86 event using is_x86_event()
  1310. */
  1311. tmp = event->pmu;
  1312. event->pmu = &pmu;
  1313. if (event->group_leader != event)
  1314. err = validate_group(event);
  1315. else
  1316. err = validate_event(event);
  1317. event->pmu = tmp;
  1318. }
  1319. if (err) {
  1320. if (event->destroy)
  1321. event->destroy(event);
  1322. }
  1323. return err;
  1324. }
  1325. static struct pmu pmu = {
  1326. .pmu_enable = x86_pmu_enable,
  1327. .pmu_disable = x86_pmu_disable,
  1328. .event_init = x86_pmu_event_init,
  1329. .add = x86_pmu_add,
  1330. .del = x86_pmu_del,
  1331. .start = x86_pmu_start,
  1332. .stop = x86_pmu_stop,
  1333. .read = x86_pmu_read,
  1334. .start_txn = x86_pmu_start_txn,
  1335. .cancel_txn = x86_pmu_cancel_txn,
  1336. .commit_txn = x86_pmu_commit_txn,
  1337. };
  1338. /*
  1339. * callchain support
  1340. */
  1341. static void
  1342. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  1343. {
  1344. /* Ignore warnings */
  1345. }
  1346. static void backtrace_warning(void *data, char *msg)
  1347. {
  1348. /* Ignore warnings */
  1349. }
  1350. static int backtrace_stack(void *data, char *name)
  1351. {
  1352. return 0;
  1353. }
  1354. static void backtrace_address(void *data, unsigned long addr, int reliable)
  1355. {
  1356. struct perf_callchain_entry *entry = data;
  1357. perf_callchain_store(entry, addr);
  1358. }
  1359. static const struct stacktrace_ops backtrace_ops = {
  1360. .warning = backtrace_warning,
  1361. .warning_symbol = backtrace_warning_symbol,
  1362. .stack = backtrace_stack,
  1363. .address = backtrace_address,
  1364. .walk_stack = print_context_stack_bp,
  1365. };
  1366. void
  1367. perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1368. {
  1369. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1370. /* TODO: We don't support guest os callchain now */
  1371. return;
  1372. }
  1373. perf_callchain_store(entry, regs->ip);
  1374. dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
  1375. }
  1376. #ifdef CONFIG_COMPAT
  1377. static inline int
  1378. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1379. {
  1380. /* 32-bit process in 64-bit kernel. */
  1381. struct stack_frame_ia32 frame;
  1382. const void __user *fp;
  1383. if (!test_thread_flag(TIF_IA32))
  1384. return 0;
  1385. fp = compat_ptr(regs->bp);
  1386. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1387. unsigned long bytes;
  1388. frame.next_frame = 0;
  1389. frame.return_address = 0;
  1390. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1391. if (bytes != sizeof(frame))
  1392. break;
  1393. if (fp < compat_ptr(regs->sp))
  1394. break;
  1395. perf_callchain_store(entry, frame.return_address);
  1396. fp = compat_ptr(frame.next_frame);
  1397. }
  1398. return 1;
  1399. }
  1400. #else
  1401. static inline int
  1402. perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
  1403. {
  1404. return 0;
  1405. }
  1406. #endif
  1407. void
  1408. perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
  1409. {
  1410. struct stack_frame frame;
  1411. const void __user *fp;
  1412. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1413. /* TODO: We don't support guest os callchain now */
  1414. return;
  1415. }
  1416. fp = (void __user *)regs->bp;
  1417. perf_callchain_store(entry, regs->ip);
  1418. if (perf_callchain_user32(regs, entry))
  1419. return;
  1420. while (entry->nr < PERF_MAX_STACK_DEPTH) {
  1421. unsigned long bytes;
  1422. frame.next_frame = NULL;
  1423. frame.return_address = 0;
  1424. bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
  1425. if (bytes != sizeof(frame))
  1426. break;
  1427. if ((unsigned long)fp < regs->sp)
  1428. break;
  1429. perf_callchain_store(entry, frame.return_address);
  1430. fp = frame.next_frame;
  1431. }
  1432. }
  1433. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  1434. {
  1435. unsigned long ip;
  1436. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  1437. ip = perf_guest_cbs->get_guest_ip();
  1438. else
  1439. ip = instruction_pointer(regs);
  1440. return ip;
  1441. }
  1442. unsigned long perf_misc_flags(struct pt_regs *regs)
  1443. {
  1444. int misc = 0;
  1445. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  1446. if (perf_guest_cbs->is_user_mode())
  1447. misc |= PERF_RECORD_MISC_GUEST_USER;
  1448. else
  1449. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  1450. } else {
  1451. if (user_mode(regs))
  1452. misc |= PERF_RECORD_MISC_USER;
  1453. else
  1454. misc |= PERF_RECORD_MISC_KERNEL;
  1455. }
  1456. if (regs->flags & PERF_EFLAGS_EXACT)
  1457. misc |= PERF_RECORD_MISC_EXACT_IP;
  1458. return misc;
  1459. }