perf_counter.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. /*
  2. * Performance counter x86 architecture code
  3. *
  4. * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
  6. * Copyright(C) 2009 Jaswinder Singh Rajput
  7. *
  8. * For licencing details see kernel-base/COPYING
  9. */
  10. #include <linux/perf_counter.h>
  11. #include <linux/capability.h>
  12. #include <linux/notifier.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/module.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/sched.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/apic.h>
  20. #include <asm/stacktrace.h>
  21. #include <asm/nmi.h>
  22. static bool perf_counters_initialized __read_mostly;
  23. /*
  24. * Number of (generic) HW counters:
  25. */
  26. static int nr_counters_generic __read_mostly;
  27. static u64 perf_counter_mask __read_mostly;
  28. static u64 counter_value_mask __read_mostly;
  29. static int counter_value_bits __read_mostly;
  30. static int nr_counters_fixed __read_mostly;
  31. struct cpu_hw_counters {
  32. struct perf_counter *counters[X86_PMC_IDX_MAX];
  33. unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  34. unsigned long interrupts;
  35. u64 throttle_ctrl;
  36. unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  37. int enabled;
  38. };
  39. /*
  40. * struct x86_pmu - generic x86 pmu
  41. */
  42. struct x86_pmu {
  43. u64 (*save_disable_all)(void);
  44. void (*restore_all)(u64);
  45. u64 (*get_status)(u64);
  46. void (*ack_status)(u64);
  47. void (*enable)(int, u64);
  48. void (*disable)(int, u64);
  49. unsigned eventsel;
  50. unsigned perfctr;
  51. u64 (*event_map)(int);
  52. u64 (*raw_event)(u64);
  53. int max_events;
  54. };
  55. static struct x86_pmu *x86_pmu __read_mostly;
  56. static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
  57. .enabled = 1,
  58. };
  59. static __read_mostly int intel_perfmon_version;
  60. /*
  61. * Intel PerfMon v3. Used on Core2 and later.
  62. */
  63. static const u64 intel_perfmon_event_map[] =
  64. {
  65. [PERF_COUNT_CPU_CYCLES] = 0x003c,
  66. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  67. [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
  68. [PERF_COUNT_CACHE_MISSES] = 0x412e,
  69. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  70. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  71. [PERF_COUNT_BUS_CYCLES] = 0x013c,
  72. };
  73. static u64 intel_pmu_event_map(int event)
  74. {
  75. return intel_perfmon_event_map[event];
  76. }
  77. static u64 intel_pmu_raw_event(u64 event)
  78. {
  79. #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
  80. #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
  81. #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
  82. #define CORE_EVNTSEL_MASK \
  83. (CORE_EVNTSEL_EVENT_MASK | \
  84. CORE_EVNTSEL_UNIT_MASK | \
  85. CORE_EVNTSEL_COUNTER_MASK)
  86. return event & CORE_EVNTSEL_MASK;
  87. }
  88. /*
  89. * AMD Performance Monitor K7 and later.
  90. */
  91. static const u64 amd_perfmon_event_map[] =
  92. {
  93. [PERF_COUNT_CPU_CYCLES] = 0x0076,
  94. [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
  95. [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
  96. [PERF_COUNT_CACHE_MISSES] = 0x0081,
  97. [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
  98. [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
  99. };
  100. static u64 amd_pmu_event_map(int event)
  101. {
  102. return amd_perfmon_event_map[event];
  103. }
  104. static u64 amd_pmu_raw_event(u64 event)
  105. {
  106. #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
  107. #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
  108. #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
  109. #define K7_EVNTSEL_MASK \
  110. (K7_EVNTSEL_EVENT_MASK | \
  111. K7_EVNTSEL_UNIT_MASK | \
  112. K7_EVNTSEL_COUNTER_MASK)
  113. return event & K7_EVNTSEL_MASK;
  114. }
  115. /*
  116. * Propagate counter elapsed time into the generic counter.
  117. * Can only be executed on the CPU where the counter is active.
  118. * Returns the delta events processed.
  119. */
  120. static void
  121. x86_perf_counter_update(struct perf_counter *counter,
  122. struct hw_perf_counter *hwc, int idx)
  123. {
  124. u64 prev_raw_count, new_raw_count, delta;
  125. /*
  126. * Careful: an NMI might modify the previous counter value.
  127. *
  128. * Our tactic to handle this is to first atomically read and
  129. * exchange a new raw count - then add that new-prev delta
  130. * count to the generic counter atomically:
  131. */
  132. again:
  133. prev_raw_count = atomic64_read(&hwc->prev_count);
  134. rdmsrl(hwc->counter_base + idx, new_raw_count);
  135. if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
  136. new_raw_count) != prev_raw_count)
  137. goto again;
  138. /*
  139. * Now we have the new raw value and have updated the prev
  140. * timestamp already. We can now calculate the elapsed delta
  141. * (counter-)time and add that to the generic counter.
  142. *
  143. * Careful, not all hw sign-extends above the physical width
  144. * of the count, so we do that by clipping the delta to 32 bits:
  145. */
  146. delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
  147. atomic64_add(delta, &counter->count);
  148. atomic64_sub(delta, &hwc->period_left);
  149. }
  150. static atomic_t num_counters;
  151. static DEFINE_MUTEX(pmc_reserve_mutex);
  152. static bool reserve_pmc_hardware(void)
  153. {
  154. int i;
  155. if (nmi_watchdog == NMI_LOCAL_APIC)
  156. disable_lapic_nmi_watchdog();
  157. for (i = 0; i < nr_counters_generic; i++) {
  158. if (!reserve_perfctr_nmi(x86_pmu->perfctr + i))
  159. goto perfctr_fail;
  160. }
  161. for (i = 0; i < nr_counters_generic; i++) {
  162. if (!reserve_evntsel_nmi(x86_pmu->eventsel + i))
  163. goto eventsel_fail;
  164. }
  165. return true;
  166. eventsel_fail:
  167. for (i--; i >= 0; i--)
  168. release_evntsel_nmi(x86_pmu->eventsel + i);
  169. i = nr_counters_generic;
  170. perfctr_fail:
  171. for (i--; i >= 0; i--)
  172. release_perfctr_nmi(x86_pmu->perfctr + i);
  173. if (nmi_watchdog == NMI_LOCAL_APIC)
  174. enable_lapic_nmi_watchdog();
  175. return false;
  176. }
  177. static void release_pmc_hardware(void)
  178. {
  179. int i;
  180. for (i = 0; i < nr_counters_generic; i++) {
  181. release_perfctr_nmi(x86_pmu->perfctr + i);
  182. release_evntsel_nmi(x86_pmu->eventsel + i);
  183. }
  184. if (nmi_watchdog == NMI_LOCAL_APIC)
  185. enable_lapic_nmi_watchdog();
  186. }
  187. static void hw_perf_counter_destroy(struct perf_counter *counter)
  188. {
  189. if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
  190. release_pmc_hardware();
  191. mutex_unlock(&pmc_reserve_mutex);
  192. }
  193. }
  194. /*
  195. * Setup the hardware configuration for a given hw_event_type
  196. */
  197. static int __hw_perf_counter_init(struct perf_counter *counter)
  198. {
  199. struct perf_counter_hw_event *hw_event = &counter->hw_event;
  200. struct hw_perf_counter *hwc = &counter->hw;
  201. int err;
  202. if (unlikely(!perf_counters_initialized))
  203. return -EINVAL;
  204. err = 0;
  205. if (atomic_inc_not_zero(&num_counters)) {
  206. mutex_lock(&pmc_reserve_mutex);
  207. if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
  208. err = -EBUSY;
  209. else
  210. atomic_inc(&num_counters);
  211. mutex_unlock(&pmc_reserve_mutex);
  212. }
  213. if (err)
  214. return err;
  215. /*
  216. * Generate PMC IRQs:
  217. * (keep 'enabled' bit clear for now)
  218. */
  219. hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  220. /*
  221. * Count user and OS events unless requested not to.
  222. */
  223. if (!hw_event->exclude_user)
  224. hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
  225. if (!hw_event->exclude_kernel)
  226. hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
  227. /*
  228. * If privileged enough, allow NMI events:
  229. */
  230. hwc->nmi = 0;
  231. if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
  232. hwc->nmi = 1;
  233. hwc->irq_period = hw_event->irq_period;
  234. /*
  235. * Intel PMCs cannot be accessed sanely above 32 bit width,
  236. * so we install an artificial 1<<31 period regardless of
  237. * the generic counter period:
  238. */
  239. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
  240. if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
  241. hwc->irq_period = 0x7FFFFFFF;
  242. atomic64_set(&hwc->period_left, hwc->irq_period);
  243. /*
  244. * Raw event type provide the config in the event structure
  245. */
  246. if (perf_event_raw(hw_event)) {
  247. hwc->config |= x86_pmu->raw_event(perf_event_config(hw_event));
  248. } else {
  249. if (perf_event_id(hw_event) >= x86_pmu->max_events)
  250. return -EINVAL;
  251. /*
  252. * The generic map:
  253. */
  254. hwc->config |= x86_pmu->event_map(perf_event_id(hw_event));
  255. }
  256. counter->destroy = hw_perf_counter_destroy;
  257. return 0;
  258. }
  259. static u64 intel_pmu_save_disable_all(void)
  260. {
  261. u64 ctrl;
  262. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  263. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  264. return ctrl;
  265. }
  266. static u64 amd_pmu_save_disable_all(void)
  267. {
  268. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  269. int enabled, idx;
  270. enabled = cpuc->enabled;
  271. cpuc->enabled = 0;
  272. /*
  273. * ensure we write the disable before we start disabling the
  274. * counters proper, so that amd_pmu_enable_counter() does the
  275. * right thing.
  276. */
  277. barrier();
  278. for (idx = 0; idx < nr_counters_generic; idx++) {
  279. u64 val;
  280. if (!test_bit(idx, cpuc->active_mask))
  281. continue;
  282. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  283. if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
  284. continue;
  285. val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
  286. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  287. }
  288. return enabled;
  289. }
  290. u64 hw_perf_save_disable(void)
  291. {
  292. if (unlikely(!perf_counters_initialized))
  293. return 0;
  294. return x86_pmu->save_disable_all();
  295. }
  296. /*
  297. * Exported because of ACPI idle
  298. */
  299. EXPORT_SYMBOL_GPL(hw_perf_save_disable);
  300. static void intel_pmu_restore_all(u64 ctrl)
  301. {
  302. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  303. }
  304. static void amd_pmu_restore_all(u64 ctrl)
  305. {
  306. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  307. int idx;
  308. cpuc->enabled = ctrl;
  309. barrier();
  310. if (!ctrl)
  311. return;
  312. for (idx = 0; idx < nr_counters_generic; idx++) {
  313. u64 val;
  314. if (!test_bit(idx, cpuc->active_mask))
  315. continue;
  316. rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
  317. if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
  318. continue;
  319. val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  320. wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
  321. }
  322. }
  323. void hw_perf_restore(u64 ctrl)
  324. {
  325. if (unlikely(!perf_counters_initialized))
  326. return;
  327. x86_pmu->restore_all(ctrl);
  328. }
  329. /*
  330. * Exported because of ACPI idle
  331. */
  332. EXPORT_SYMBOL_GPL(hw_perf_restore);
  333. static u64 intel_pmu_get_status(u64 mask)
  334. {
  335. u64 status;
  336. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  337. return status;
  338. }
  339. static u64 amd_pmu_get_status(u64 mask)
  340. {
  341. u64 status = 0;
  342. int idx;
  343. for (idx = 0; idx < nr_counters_generic; idx++) {
  344. s64 val;
  345. if (!(mask & (1 << idx)))
  346. continue;
  347. rdmsrl(MSR_K7_PERFCTR0 + idx, val);
  348. val <<= (64 - counter_value_bits);
  349. if (val >= 0)
  350. status |= (1 << idx);
  351. }
  352. return status;
  353. }
  354. static u64 hw_perf_get_status(u64 mask)
  355. {
  356. if (unlikely(!perf_counters_initialized))
  357. return 0;
  358. return x86_pmu->get_status(mask);
  359. }
  360. static void intel_pmu_ack_status(u64 ack)
  361. {
  362. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  363. }
  364. static void amd_pmu_ack_status(u64 ack)
  365. {
  366. }
  367. static void hw_perf_ack_status(u64 ack)
  368. {
  369. if (unlikely(!perf_counters_initialized))
  370. return;
  371. x86_pmu->ack_status(ack);
  372. }
  373. static void intel_pmu_enable_counter(int idx, u64 config)
  374. {
  375. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
  376. config | ARCH_PERFMON_EVENTSEL0_ENABLE);
  377. }
  378. static void amd_pmu_enable_counter(int idx, u64 config)
  379. {
  380. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  381. set_bit(idx, cpuc->active_mask);
  382. if (cpuc->enabled)
  383. config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  384. wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
  385. }
  386. static void hw_perf_enable(int idx, u64 config)
  387. {
  388. if (unlikely(!perf_counters_initialized))
  389. return;
  390. x86_pmu->enable(idx, config);
  391. }
  392. static void intel_pmu_disable_counter(int idx, u64 config)
  393. {
  394. wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
  395. }
  396. static void amd_pmu_disable_counter(int idx, u64 config)
  397. {
  398. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  399. clear_bit(idx, cpuc->active_mask);
  400. wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
  401. }
  402. static void hw_perf_disable(int idx, u64 config)
  403. {
  404. if (unlikely(!perf_counters_initialized))
  405. return;
  406. x86_pmu->disable(idx, config);
  407. }
  408. static inline void
  409. __pmc_fixed_disable(struct perf_counter *counter,
  410. struct hw_perf_counter *hwc, unsigned int __idx)
  411. {
  412. int idx = __idx - X86_PMC_IDX_FIXED;
  413. u64 ctrl_val, mask;
  414. int err;
  415. mask = 0xfULL << (idx * 4);
  416. rdmsrl(hwc->config_base, ctrl_val);
  417. ctrl_val &= ~mask;
  418. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  419. }
  420. static inline void
  421. __x86_pmu_disable(struct perf_counter *counter,
  422. struct hw_perf_counter *hwc, unsigned int idx)
  423. {
  424. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
  425. __pmc_fixed_disable(counter, hwc, idx);
  426. else
  427. hw_perf_disable(idx, hwc->config);
  428. }
  429. static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
  430. /*
  431. * Set the next IRQ period, based on the hwc->period_left value.
  432. * To be called with the counter disabled in hw:
  433. */
  434. static void
  435. __hw_perf_counter_set_period(struct perf_counter *counter,
  436. struct hw_perf_counter *hwc, int idx)
  437. {
  438. s64 left = atomic64_read(&hwc->period_left);
  439. s64 period = hwc->irq_period;
  440. int err;
  441. /*
  442. * If we are way outside a reasoable range then just skip forward:
  443. */
  444. if (unlikely(left <= -period)) {
  445. left = period;
  446. atomic64_set(&hwc->period_left, left);
  447. }
  448. if (unlikely(left <= 0)) {
  449. left += period;
  450. atomic64_set(&hwc->period_left, left);
  451. }
  452. per_cpu(prev_left[idx], smp_processor_id()) = left;
  453. /*
  454. * The hw counter starts counting from this counter offset,
  455. * mark it to be able to extra future deltas:
  456. */
  457. atomic64_set(&hwc->prev_count, (u64)-left);
  458. err = checking_wrmsrl(hwc->counter_base + idx,
  459. (u64)(-left) & counter_value_mask);
  460. }
  461. static inline void
  462. __pmc_fixed_enable(struct perf_counter *counter,
  463. struct hw_perf_counter *hwc, unsigned int __idx)
  464. {
  465. int idx = __idx - X86_PMC_IDX_FIXED;
  466. u64 ctrl_val, bits, mask;
  467. int err;
  468. /*
  469. * Enable IRQ generation (0x8),
  470. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  471. * if requested:
  472. */
  473. bits = 0x8ULL;
  474. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  475. bits |= 0x2;
  476. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  477. bits |= 0x1;
  478. bits <<= (idx * 4);
  479. mask = 0xfULL << (idx * 4);
  480. rdmsrl(hwc->config_base, ctrl_val);
  481. ctrl_val &= ~mask;
  482. ctrl_val |= bits;
  483. err = checking_wrmsrl(hwc->config_base, ctrl_val);
  484. }
  485. static void
  486. __x86_pmu_enable(struct perf_counter *counter,
  487. struct hw_perf_counter *hwc, int idx)
  488. {
  489. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
  490. __pmc_fixed_enable(counter, hwc, idx);
  491. else
  492. hw_perf_enable(idx, hwc->config);
  493. }
  494. static int
  495. fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
  496. {
  497. unsigned int event;
  498. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  499. return -1;
  500. if (unlikely(hwc->nmi))
  501. return -1;
  502. event = hwc->config & ARCH_PERFMON_EVENT_MASK;
  503. if (unlikely(event == x86_pmu->event_map(PERF_COUNT_INSTRUCTIONS)))
  504. return X86_PMC_IDX_FIXED_INSTRUCTIONS;
  505. if (unlikely(event == x86_pmu->event_map(PERF_COUNT_CPU_CYCLES)))
  506. return X86_PMC_IDX_FIXED_CPU_CYCLES;
  507. if (unlikely(event == x86_pmu->event_map(PERF_COUNT_BUS_CYCLES)))
  508. return X86_PMC_IDX_FIXED_BUS_CYCLES;
  509. return -1;
  510. }
  511. /*
  512. * Find a PMC slot for the freshly enabled / scheduled in counter:
  513. */
  514. static int x86_pmu_enable(struct perf_counter *counter)
  515. {
  516. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  517. struct hw_perf_counter *hwc = &counter->hw;
  518. int idx;
  519. idx = fixed_mode_idx(counter, hwc);
  520. if (idx >= 0) {
  521. /*
  522. * Try to get the fixed counter, if that is already taken
  523. * then try to get a generic counter:
  524. */
  525. if (test_and_set_bit(idx, cpuc->used))
  526. goto try_generic;
  527. hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
  528. /*
  529. * We set it so that counter_base + idx in wrmsr/rdmsr maps to
  530. * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
  531. */
  532. hwc->counter_base =
  533. MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
  534. hwc->idx = idx;
  535. } else {
  536. idx = hwc->idx;
  537. /* Try to get the previous generic counter again */
  538. if (test_and_set_bit(idx, cpuc->used)) {
  539. try_generic:
  540. idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
  541. if (idx == nr_counters_generic)
  542. return -EAGAIN;
  543. set_bit(idx, cpuc->used);
  544. hwc->idx = idx;
  545. }
  546. hwc->config_base = x86_pmu->eventsel;
  547. hwc->counter_base = x86_pmu->perfctr;
  548. }
  549. perf_counters_lapic_init(hwc->nmi);
  550. __x86_pmu_disable(counter, hwc, idx);
  551. cpuc->counters[idx] = counter;
  552. /*
  553. * Make it visible before enabling the hw:
  554. */
  555. barrier();
  556. __hw_perf_counter_set_period(counter, hwc, idx);
  557. __x86_pmu_enable(counter, hwc, idx);
  558. return 0;
  559. }
  560. void perf_counter_print_debug(void)
  561. {
  562. u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
  563. struct cpu_hw_counters *cpuc;
  564. int cpu, idx;
  565. if (!nr_counters_generic)
  566. return;
  567. local_irq_disable();
  568. cpu = smp_processor_id();
  569. cpuc = &per_cpu(cpu_hw_counters, cpu);
  570. if (intel_perfmon_version >= 2) {
  571. rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
  572. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  573. rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
  574. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
  575. pr_info("\n");
  576. pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
  577. pr_info("CPU#%d: status: %016llx\n", cpu, status);
  578. pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
  579. pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
  580. }
  581. pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
  582. for (idx = 0; idx < nr_counters_generic; idx++) {
  583. rdmsrl(x86_pmu->eventsel + idx, pmc_ctrl);
  584. rdmsrl(x86_pmu->perfctr + idx, pmc_count);
  585. prev_left = per_cpu(prev_left[idx], cpu);
  586. pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
  587. cpu, idx, pmc_ctrl);
  588. pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
  589. cpu, idx, pmc_count);
  590. pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
  591. cpu, idx, prev_left);
  592. }
  593. for (idx = 0; idx < nr_counters_fixed; idx++) {
  594. rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
  595. pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
  596. cpu, idx, pmc_count);
  597. }
  598. local_irq_enable();
  599. }
  600. static void x86_pmu_disable(struct perf_counter *counter)
  601. {
  602. struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
  603. struct hw_perf_counter *hwc = &counter->hw;
  604. unsigned int idx = hwc->idx;
  605. __x86_pmu_disable(counter, hwc, idx);
  606. clear_bit(idx, cpuc->used);
  607. cpuc->counters[idx] = NULL;
  608. /*
  609. * Make sure the cleared pointer becomes visible before we
  610. * (potentially) free the counter:
  611. */
  612. barrier();
  613. /*
  614. * Drain the remaining delta count out of a counter
  615. * that we are disabling:
  616. */
  617. x86_perf_counter_update(counter, hwc, idx);
  618. }
  619. /*
  620. * Save and restart an expired counter. Called by NMI contexts,
  621. * so it has to be careful about preempting normal counter ops:
  622. */
  623. static void perf_save_and_restart(struct perf_counter *counter)
  624. {
  625. struct hw_perf_counter *hwc = &counter->hw;
  626. int idx = hwc->idx;
  627. x86_perf_counter_update(counter, hwc, idx);
  628. __hw_perf_counter_set_period(counter, hwc, idx);
  629. if (counter->state == PERF_COUNTER_STATE_ACTIVE)
  630. __x86_pmu_enable(counter, hwc, idx);
  631. }
  632. /*
  633. * Maximum interrupt frequency of 100KHz per CPU
  634. */
  635. #define PERFMON_MAX_INTERRUPTS (100000/HZ)
  636. /*
  637. * This handler is triggered by the local APIC, so the APIC IRQ handling
  638. * rules apply:
  639. */
  640. static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
  641. {
  642. int bit, cpu = smp_processor_id();
  643. u64 ack, status;
  644. struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
  645. int ret = 0;
  646. cpuc->throttle_ctrl = hw_perf_save_disable();
  647. status = hw_perf_get_status(cpuc->throttle_ctrl);
  648. if (!status)
  649. goto out;
  650. ret = 1;
  651. again:
  652. inc_irq_stat(apic_perf_irqs);
  653. ack = status;
  654. for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  655. struct perf_counter *counter = cpuc->counters[bit];
  656. clear_bit(bit, (unsigned long *) &status);
  657. if (!counter)
  658. continue;
  659. perf_save_and_restart(counter);
  660. if (perf_counter_overflow(counter, nmi, regs, 0))
  661. __x86_pmu_disable(counter, &counter->hw, bit);
  662. }
  663. hw_perf_ack_status(ack);
  664. /*
  665. * Repeat if there is more work to be done:
  666. */
  667. status = hw_perf_get_status(cpuc->throttle_ctrl);
  668. if (status)
  669. goto again;
  670. out:
  671. /*
  672. * Restore - do not reenable when global enable is off or throttled:
  673. */
  674. if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
  675. hw_perf_restore(cpuc->throttle_ctrl);
  676. return ret;
  677. }
  678. void perf_counter_unthrottle(void)
  679. {
  680. struct cpu_hw_counters *cpuc;
  681. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  682. return;
  683. if (unlikely(!perf_counters_initialized))
  684. return;
  685. cpuc = &__get_cpu_var(cpu_hw_counters);
  686. if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
  687. if (printk_ratelimit())
  688. printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
  689. hw_perf_restore(cpuc->throttle_ctrl);
  690. }
  691. cpuc->interrupts = 0;
  692. }
  693. void smp_perf_counter_interrupt(struct pt_regs *regs)
  694. {
  695. irq_enter();
  696. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  697. ack_APIC_irq();
  698. __smp_perf_counter_interrupt(regs, 0);
  699. irq_exit();
  700. }
  701. void smp_perf_pending_interrupt(struct pt_regs *regs)
  702. {
  703. irq_enter();
  704. ack_APIC_irq();
  705. inc_irq_stat(apic_pending_irqs);
  706. perf_counter_do_pending();
  707. irq_exit();
  708. }
  709. void set_perf_counter_pending(void)
  710. {
  711. apic->send_IPI_self(LOCAL_PENDING_VECTOR);
  712. }
  713. void perf_counters_lapic_init(int nmi)
  714. {
  715. u32 apic_val;
  716. if (!perf_counters_initialized)
  717. return;
  718. /*
  719. * Enable the performance counter vector in the APIC LVT:
  720. */
  721. apic_val = apic_read(APIC_LVTERR);
  722. apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
  723. if (nmi)
  724. apic_write(APIC_LVTPC, APIC_DM_NMI);
  725. else
  726. apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
  727. apic_write(APIC_LVTERR, apic_val);
  728. }
  729. static int __kprobes
  730. perf_counter_nmi_handler(struct notifier_block *self,
  731. unsigned long cmd, void *__args)
  732. {
  733. struct die_args *args = __args;
  734. struct pt_regs *regs;
  735. int ret;
  736. switch (cmd) {
  737. case DIE_NMI:
  738. case DIE_NMI_IPI:
  739. break;
  740. default:
  741. return NOTIFY_DONE;
  742. }
  743. regs = args->regs;
  744. apic_write(APIC_LVTPC, APIC_DM_NMI);
  745. ret = __smp_perf_counter_interrupt(regs, 1);
  746. return ret ? NOTIFY_STOP : NOTIFY_OK;
  747. }
  748. static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
  749. .notifier_call = perf_counter_nmi_handler,
  750. .next = NULL,
  751. .priority = 1
  752. };
  753. static struct x86_pmu intel_pmu = {
  754. .save_disable_all = intel_pmu_save_disable_all,
  755. .restore_all = intel_pmu_restore_all,
  756. .get_status = intel_pmu_get_status,
  757. .ack_status = intel_pmu_ack_status,
  758. .enable = intel_pmu_enable_counter,
  759. .disable = intel_pmu_disable_counter,
  760. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  761. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  762. .event_map = intel_pmu_event_map,
  763. .raw_event = intel_pmu_raw_event,
  764. .max_events = ARRAY_SIZE(intel_perfmon_event_map),
  765. };
  766. static struct x86_pmu amd_pmu = {
  767. .save_disable_all = amd_pmu_save_disable_all,
  768. .restore_all = amd_pmu_restore_all,
  769. .get_status = amd_pmu_get_status,
  770. .ack_status = amd_pmu_ack_status,
  771. .enable = amd_pmu_enable_counter,
  772. .disable = amd_pmu_disable_counter,
  773. .eventsel = MSR_K7_EVNTSEL0,
  774. .perfctr = MSR_K7_PERFCTR0,
  775. .event_map = amd_pmu_event_map,
  776. .raw_event = amd_pmu_raw_event,
  777. .max_events = ARRAY_SIZE(amd_perfmon_event_map),
  778. };
  779. static struct x86_pmu *intel_pmu_init(void)
  780. {
  781. union cpuid10_edx edx;
  782. union cpuid10_eax eax;
  783. unsigned int unused;
  784. unsigned int ebx;
  785. if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  786. return NULL;
  787. /*
  788. * Check whether the Architectural PerfMon supports
  789. * Branch Misses Retired Event or not.
  790. */
  791. cpuid(10, &eax.full, &ebx, &unused, &edx.full);
  792. if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
  793. return NULL;
  794. intel_perfmon_version = eax.split.version_id;
  795. if (intel_perfmon_version < 2)
  796. return NULL;
  797. pr_info("Intel Performance Monitoring support detected.\n");
  798. pr_info("... version: %d\n", intel_perfmon_version);
  799. pr_info("... bit width: %d\n", eax.split.bit_width);
  800. pr_info("... mask length: %d\n", eax.split.mask_length);
  801. nr_counters_generic = eax.split.num_counters;
  802. nr_counters_fixed = edx.split.num_counters_fixed;
  803. counter_value_mask = (1ULL << eax.split.bit_width) - 1;
  804. return &intel_pmu;
  805. }
  806. static struct x86_pmu *amd_pmu_init(void)
  807. {
  808. nr_counters_generic = 4;
  809. nr_counters_fixed = 0;
  810. counter_value_mask = 0x0000FFFFFFFFFFFFULL;
  811. counter_value_bits = 48;
  812. pr_info("AMD Performance Monitoring support detected.\n");
  813. return &amd_pmu;
  814. }
  815. void __init init_hw_perf_counters(void)
  816. {
  817. switch (boot_cpu_data.x86_vendor) {
  818. case X86_VENDOR_INTEL:
  819. x86_pmu = intel_pmu_init();
  820. break;
  821. case X86_VENDOR_AMD:
  822. x86_pmu = amd_pmu_init();
  823. break;
  824. default:
  825. return;
  826. }
  827. if (!x86_pmu)
  828. return;
  829. pr_info("... num counters: %d\n", nr_counters_generic);
  830. if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
  831. nr_counters_generic = X86_PMC_MAX_GENERIC;
  832. WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
  833. nr_counters_generic, X86_PMC_MAX_GENERIC);
  834. }
  835. perf_counter_mask = (1 << nr_counters_generic) - 1;
  836. perf_max_counters = nr_counters_generic;
  837. pr_info("... value mask: %016Lx\n", counter_value_mask);
  838. if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
  839. nr_counters_fixed = X86_PMC_MAX_FIXED;
  840. WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
  841. nr_counters_fixed, X86_PMC_MAX_FIXED);
  842. }
  843. pr_info("... fixed counters: %d\n", nr_counters_fixed);
  844. perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
  845. pr_info("... counter mask: %016Lx\n", perf_counter_mask);
  846. perf_counters_initialized = true;
  847. perf_counters_lapic_init(0);
  848. register_die_notifier(&perf_counter_nmi_notifier);
  849. }
  850. static void x86_pmu_read(struct perf_counter *counter)
  851. {
  852. x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
  853. }
  854. static const struct pmu pmu = {
  855. .enable = x86_pmu_enable,
  856. .disable = x86_pmu_disable,
  857. .read = x86_pmu_read,
  858. };
  859. const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
  860. {
  861. int err;
  862. err = __hw_perf_counter_init(counter);
  863. if (err)
  864. return ERR_PTR(err);
  865. return &pmu;
  866. }
  867. /*
  868. * callchain support
  869. */
  870. static inline
  871. void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
  872. {
  873. if (entry->nr < MAX_STACK_DEPTH)
  874. entry->ip[entry->nr++] = ip;
  875. }
  876. static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
  877. static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
  878. static void
  879. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  880. {
  881. /* Ignore warnings */
  882. }
  883. static void backtrace_warning(void *data, char *msg)
  884. {
  885. /* Ignore warnings */
  886. }
  887. static int backtrace_stack(void *data, char *name)
  888. {
  889. /* Don't bother with IRQ stacks for now */
  890. return -1;
  891. }
  892. static void backtrace_address(void *data, unsigned long addr, int reliable)
  893. {
  894. struct perf_callchain_entry *entry = data;
  895. if (reliable)
  896. callchain_store(entry, addr);
  897. }
  898. static const struct stacktrace_ops backtrace_ops = {
  899. .warning = backtrace_warning,
  900. .warning_symbol = backtrace_warning_symbol,
  901. .stack = backtrace_stack,
  902. .address = backtrace_address,
  903. };
  904. static void
  905. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  906. {
  907. unsigned long bp;
  908. char *stack;
  909. int nr = entry->nr;
  910. callchain_store(entry, instruction_pointer(regs));
  911. stack = ((char *)regs + sizeof(struct pt_regs));
  912. #ifdef CONFIG_FRAME_POINTER
  913. bp = frame_pointer(regs);
  914. #else
  915. bp = 0;
  916. #endif
  917. dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
  918. entry->kernel = entry->nr - nr;
  919. }
  920. struct stack_frame {
  921. const void __user *next_fp;
  922. unsigned long return_address;
  923. };
  924. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  925. {
  926. int ret;
  927. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  928. return 0;
  929. ret = 1;
  930. pagefault_disable();
  931. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  932. ret = 0;
  933. pagefault_enable();
  934. return ret;
  935. }
  936. static void
  937. perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  938. {
  939. struct stack_frame frame;
  940. const void __user *fp;
  941. int nr = entry->nr;
  942. regs = (struct pt_regs *)current->thread.sp0 - 1;
  943. fp = (void __user *)regs->bp;
  944. callchain_store(entry, regs->ip);
  945. while (entry->nr < MAX_STACK_DEPTH) {
  946. frame.next_fp = NULL;
  947. frame.return_address = 0;
  948. if (!copy_stack_frame(fp, &frame))
  949. break;
  950. if ((unsigned long)fp < user_stack_pointer(regs))
  951. break;
  952. callchain_store(entry, frame.return_address);
  953. fp = frame.next_fp;
  954. }
  955. entry->user = entry->nr - nr;
  956. }
  957. static void
  958. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  959. {
  960. int is_user;
  961. if (!regs)
  962. return;
  963. is_user = user_mode(regs);
  964. if (!current || current->pid == 0)
  965. return;
  966. if (is_user && current->state != TASK_RUNNING)
  967. return;
  968. if (!is_user)
  969. perf_callchain_kernel(regs, entry);
  970. if (current->mm)
  971. perf_callchain_user(regs, entry);
  972. }
  973. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  974. {
  975. struct perf_callchain_entry *entry;
  976. if (in_nmi())
  977. entry = &__get_cpu_var(nmi_entry);
  978. else
  979. entry = &__get_cpu_var(irq_entry);
  980. entry->nr = 0;
  981. entry->hv = 0;
  982. entry->kernel = 0;
  983. entry->user = 0;
  984. perf_do_callchain(regs, entry);
  985. return entry;
  986. }