12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034 |
- /*
- * Performance counter x86 architecture code
- *
- * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
- * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
- * Copyright(C) 2009 Jaswinder Singh Rajput
- *
- * For licencing details see kernel-base/COPYING
- */
- #include <linux/perf_counter.h>
- #include <linux/capability.h>
- #include <linux/notifier.h>
- #include <linux/hardirq.h>
- #include <linux/kprobes.h>
- #include <linux/module.h>
- #include <linux/kdebug.h>
- #include <linux/sched.h>
- #include <asm/perf_counter.h>
- #include <asm/apic.h>
- static bool perf_counters_initialized __read_mostly;
- /*
- * Number of (generic) HW counters:
- */
- static int nr_counters_generic __read_mostly;
- static u64 perf_counter_mask __read_mostly;
- static u64 counter_value_mask __read_mostly;
- static int counter_value_bits __read_mostly;
- static int nr_counters_fixed __read_mostly;
- struct cpu_hw_counters {
- struct perf_counter *counters[X86_PMC_IDX_MAX];
- unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long interrupts;
- u64 throttle_ctrl;
- unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- int enabled;
- };
- /*
- * struct pmc_x86_ops - performance counter x86 ops
- */
- struct pmc_x86_ops {
- u64 (*save_disable_all)(void);
- void (*restore_all)(u64);
- u64 (*get_status)(u64);
- void (*ack_status)(u64);
- void (*enable)(int, u64);
- void (*disable)(int, u64);
- unsigned eventsel;
- unsigned perfctr;
- u64 (*event_map)(int);
- u64 (*raw_event)(u64);
- int max_events;
- };
- static struct pmc_x86_ops *pmc_ops;
- static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
- .enabled = 1,
- };
- /*
- * Intel PerfMon v3. Used on Core2 and later.
- */
- static const u64 intel_perfmon_event_map[] =
- {
- [PERF_COUNT_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_BUS_CYCLES] = 0x013c,
- };
- static u64 pmc_intel_event_map(int event)
- {
- return intel_perfmon_event_map[event];
- }
- static u64 pmc_intel_raw_event(u64 event)
- {
- #define CORE_EVNTSEL_EVENT_MASK 0x000000FF
- #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00
- #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000
- #define CORE_EVNTSEL_MASK \
- (CORE_EVNTSEL_EVENT_MASK | \
- CORE_EVNTSEL_UNIT_MASK | \
- CORE_EVNTSEL_COUNTER_MASK)
- return event & CORE_EVNTSEL_MASK;
- }
- /*
- * AMD Performance Monitor K7 and later.
- */
- static const u64 amd_perfmon_event_map[] =
- {
- [PERF_COUNT_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
- [PERF_COUNT_CACHE_MISSES] = 0x0081,
- [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
- };
- static u64 pmc_amd_event_map(int event)
- {
- return amd_perfmon_event_map[event];
- }
- static u64 pmc_amd_raw_event(u64 event)
- {
- #define K7_EVNTSEL_EVENT_MASK 0x7000000FF
- #define K7_EVNTSEL_UNIT_MASK 0x00000FF00
- #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000
- #define K7_EVNTSEL_MASK \
- (K7_EVNTSEL_EVENT_MASK | \
- K7_EVNTSEL_UNIT_MASK | \
- K7_EVNTSEL_COUNTER_MASK)
- return event & K7_EVNTSEL_MASK;
- }
- /*
- * Propagate counter elapsed time into the generic counter.
- * Can only be executed on the CPU where the counter is active.
- * Returns the delta events processed.
- */
- static void
- x86_perf_counter_update(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
- {
- u64 prev_raw_count, new_raw_count, delta;
- /*
- * Careful: an NMI might modify the previous counter value.
- *
- * Our tactic to handle this is to first atomically read and
- * exchange a new raw count - then add that new-prev delta
- * count to the generic counter atomically:
- */
- again:
- prev_raw_count = atomic64_read(&hwc->prev_count);
- rdmsrl(hwc->counter_base + idx, new_raw_count);
- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count)
- goto again;
- /*
- * Now we have the new raw value and have updated the prev
- * timestamp already. We can now calculate the elapsed delta
- * (counter-)time and add that to the generic counter.
- *
- * Careful, not all hw sign-extends above the physical width
- * of the count, so we do that by clipping the delta to 32 bits:
- */
- delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
- atomic64_add(delta, &counter->count);
- atomic64_sub(delta, &hwc->period_left);
- }
- /*
- * Setup the hardware configuration for a given hw_event_type
- */
- static int __hw_perf_counter_init(struct perf_counter *counter)
- {
- struct perf_counter_hw_event *hw_event = &counter->hw_event;
- struct hw_perf_counter *hwc = &counter->hw;
- if (unlikely(!perf_counters_initialized))
- return -EINVAL;
- /*
- * Generate PMC IRQs:
- * (keep 'enabled' bit clear for now)
- */
- hwc->config = ARCH_PERFMON_EVENTSEL_INT;
- /*
- * Count user and OS events unless requested not to.
- */
- if (!hw_event->exclude_user)
- hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
- if (!hw_event->exclude_kernel)
- hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
- /*
- * If privileged enough, allow NMI events:
- */
- hwc->nmi = 0;
- if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
- hwc->nmi = 1;
- hwc->irq_period = hw_event->irq_period;
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic counter period:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
- if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
- hwc->irq_period = 0x7FFFFFFF;
- atomic64_set(&hwc->period_left, hwc->irq_period);
- /*
- * Raw event type provide the config in the event structure
- */
- if (hw_event->raw) {
- hwc->config |= pmc_ops->raw_event(hw_event->type);
- } else {
- if (hw_event->type >= pmc_ops->max_events)
- return -EINVAL;
- /*
- * The generic map:
- */
- hwc->config |= pmc_ops->event_map(hw_event->type);
- }
- counter->wakeup_pending = 0;
- return 0;
- }
- static u64 pmc_intel_save_disable_all(void)
- {
- u64 ctrl;
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- return ctrl;
- }
- static u64 pmc_amd_save_disable_all(void)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- int enabled, idx;
- enabled = cpuc->enabled;
- cpuc->enabled = 0;
- barrier();
- for (idx = 0; idx < nr_counters_generic; idx++) {
- u64 val;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
- val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
- }
- return enabled;
- }
- u64 hw_perf_save_disable(void)
- {
- if (unlikely(!perf_counters_initialized))
- return 0;
- return pmc_ops->save_disable_all();
- }
- /*
- * Exported because of ACPI idle
- */
- EXPORT_SYMBOL_GPL(hw_perf_save_disable);
- static void pmc_intel_restore_all(u64 ctrl)
- {
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
- }
- static void pmc_amd_restore_all(u64 ctrl)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- int idx;
- cpuc->enabled = ctrl;
- barrier();
- if (!ctrl)
- return;
- for (idx = 0; idx < nr_counters_generic; idx++) {
- if (test_bit(idx, cpuc->active_mask)) {
- u64 val;
- rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
- val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
- }
- }
- }
- void hw_perf_restore(u64 ctrl)
- {
- if (unlikely(!perf_counters_initialized))
- return;
- pmc_ops->restore_all(ctrl);
- }
- /*
- * Exported because of ACPI idle
- */
- EXPORT_SYMBOL_GPL(hw_perf_restore);
- static u64 pmc_intel_get_status(u64 mask)
- {
- u64 status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- return status;
- }
- static u64 pmc_amd_get_status(u64 mask)
- {
- u64 status = 0;
- int idx;
- for (idx = 0; idx < nr_counters_generic; idx++) {
- s64 val;
- if (!(mask & (1 << idx)))
- continue;
- rdmsrl(MSR_K7_PERFCTR0 + idx, val);
- val <<= (64 - counter_value_bits);
- if (val >= 0)
- status |= (1 << idx);
- }
- return status;
- }
- static u64 hw_perf_get_status(u64 mask)
- {
- if (unlikely(!perf_counters_initialized))
- return 0;
- return pmc_ops->get_status(mask);
- }
- static void pmc_intel_ack_status(u64 ack)
- {
- wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
- }
- static void pmc_amd_ack_status(u64 ack)
- {
- }
- static void hw_perf_ack_status(u64 ack)
- {
- if (unlikely(!perf_counters_initialized))
- return;
- pmc_ops->ack_status(ack);
- }
- static void pmc_intel_enable(int idx, u64 config)
- {
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx,
- config | ARCH_PERFMON_EVENTSEL0_ENABLE);
- }
- static void pmc_amd_enable(int idx, u64 config)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- set_bit(idx, cpuc->active_mask);
- if (cpuc->enabled)
- config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
- wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
- }
- static void hw_perf_enable(int idx, u64 config)
- {
- if (unlikely(!perf_counters_initialized))
- return;
- pmc_ops->enable(idx, config);
- }
- static void pmc_intel_disable(int idx, u64 config)
- {
- wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config);
- }
- static void pmc_amd_disable(int idx, u64 config)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- clear_bit(idx, cpuc->active_mask);
- wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
- }
- static void hw_perf_disable(int idx, u64 config)
- {
- if (unlikely(!perf_counters_initialized))
- return;
- pmc_ops->disable(idx, config);
- }
- static inline void
- __pmc_fixed_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int __idx)
- {
- int idx = __idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, mask;
- int err;
- mask = 0xfULL << (idx * 4);
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
- }
- static inline void
- __pmc_generic_disable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int idx)
- {
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- __pmc_fixed_disable(counter, hwc, idx);
- else
- hw_perf_disable(idx, hwc->config);
- }
- static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
- /*
- * Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the counter disabled in hw:
- */
- static void
- __hw_perf_counter_set_period(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
- {
- s64 left = atomic64_read(&hwc->period_left);
- s32 period = hwc->irq_period;
- int err;
- /*
- * If we are way outside a reasoable range then just skip forward:
- */
- if (unlikely(left <= -period)) {
- left = period;
- atomic64_set(&hwc->period_left, left);
- }
- if (unlikely(left <= 0)) {
- left += period;
- atomic64_set(&hwc->period_left, left);
- }
- per_cpu(prev_left[idx], smp_processor_id()) = left;
- /*
- * The hw counter starts counting from this counter offset,
- * mark it to be able to extra future deltas:
- */
- atomic64_set(&hwc->prev_count, (u64)-left);
- err = checking_wrmsrl(hwc->counter_base + idx,
- (u64)(-left) & counter_value_mask);
- }
- static inline void
- __pmc_fixed_enable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, unsigned int __idx)
- {
- int idx = __idx - X86_PMC_IDX_FIXED;
- u64 ctrl_val, bits, mask;
- int err;
- /*
- * Enable IRQ generation (0x8),
- * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
- * if requested:
- */
- bits = 0x8ULL;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
- bits |= 0x2;
- if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
- bits |= 0x1;
- bits <<= (idx * 4);
- mask = 0xfULL << (idx * 4);
- rdmsrl(hwc->config_base, ctrl_val);
- ctrl_val &= ~mask;
- ctrl_val |= bits;
- err = checking_wrmsrl(hwc->config_base, ctrl_val);
- }
- static void
- __pmc_generic_enable(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
- {
- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
- __pmc_fixed_enable(counter, hwc, idx);
- else
- hw_perf_enable(idx, hwc->config);
- }
- static int
- fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
- {
- unsigned int event;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- return -1;
- if (unlikely(hwc->nmi))
- return -1;
- event = hwc->config & ARCH_PERFMON_EVENT_MASK;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_INSTRUCTIONS)))
- return X86_PMC_IDX_FIXED_INSTRUCTIONS;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_CPU_CYCLES)))
- return X86_PMC_IDX_FIXED_CPU_CYCLES;
- if (unlikely(event == pmc_ops->event_map(PERF_COUNT_BUS_CYCLES)))
- return X86_PMC_IDX_FIXED_BUS_CYCLES;
- return -1;
- }
- /*
- * Find a PMC slot for the freshly enabled / scheduled in counter:
- */
- static int pmc_generic_enable(struct perf_counter *counter)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
- int idx;
- idx = fixed_mode_idx(counter, hwc);
- if (idx >= 0) {
- /*
- * Try to get the fixed counter, if that is already taken
- * then try to get a generic counter:
- */
- if (test_and_set_bit(idx, cpuc->used))
- goto try_generic;
- hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- /*
- * We set it so that counter_base + idx in wrmsr/rdmsr maps to
- * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
- */
- hwc->counter_base =
- MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
- hwc->idx = idx;
- } else {
- idx = hwc->idx;
- /* Try to get the previous generic counter again */
- if (test_and_set_bit(idx, cpuc->used)) {
- try_generic:
- idx = find_first_zero_bit(cpuc->used, nr_counters_generic);
- if (idx == nr_counters_generic)
- return -EAGAIN;
- set_bit(idx, cpuc->used);
- hwc->idx = idx;
- }
- hwc->config_base = pmc_ops->eventsel;
- hwc->counter_base = pmc_ops->perfctr;
- }
- perf_counters_lapic_init(hwc->nmi);
- __pmc_generic_disable(counter, hwc, idx);
- cpuc->counters[idx] = counter;
- /*
- * Make it visible before enabling the hw:
- */
- smp_wmb();
- __hw_perf_counter_set_period(counter, hwc, idx);
- __pmc_generic_enable(counter, hwc, idx);
- return 0;
- }
- void perf_counter_print_debug(void)
- {
- u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- struct cpu_hw_counters *cpuc;
- int cpu, idx;
- if (!nr_counters_generic)
- return;
- local_irq_disable();
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
- rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
- pr_info("\n");
- pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
- pr_info("CPU#%d: status: %016llx\n", cpu, status);
- pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
- pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- }
- pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
- for (idx = 0; idx < nr_counters_generic; idx++) {
- rdmsrl(pmc_ops->eventsel + idx, pmc_ctrl);
- rdmsrl(pmc_ops->perfctr + idx, pmc_count);
- prev_left = per_cpu(prev_left[idx], cpu);
- pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
- cpu, idx, pmc_ctrl);
- pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
- cpu, idx, pmc_count);
- pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
- cpu, idx, prev_left);
- }
- for (idx = 0; idx < nr_counters_fixed; idx++) {
- rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
- pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
- cpu, idx, pmc_count);
- }
- local_irq_enable();
- }
- static void pmc_generic_disable(struct perf_counter *counter)
- {
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
- unsigned int idx = hwc->idx;
- __pmc_generic_disable(counter, hwc, idx);
- clear_bit(idx, cpuc->used);
- cpuc->counters[idx] = NULL;
- /*
- * Make sure the cleared pointer becomes visible before we
- * (potentially) free the counter:
- */
- smp_wmb();
- /*
- * Drain the remaining delta count out of a counter
- * that we are disabling:
- */
- x86_perf_counter_update(counter, hwc, idx);
- }
- static void perf_store_irq_data(struct perf_counter *counter, u64 data)
- {
- struct perf_data *irqdata = counter->irqdata;
- if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
- irqdata->overrun++;
- } else {
- u64 *p = (u64 *) &irqdata->data[irqdata->len];
- *p = data;
- irqdata->len += sizeof(u64);
- }
- }
- /*
- * Save and restart an expired counter. Called by NMI contexts,
- * so it has to be careful about preempting normal counter ops:
- */
- static void perf_save_and_restart(struct perf_counter *counter)
- {
- struct hw_perf_counter *hwc = &counter->hw;
- int idx = hwc->idx;
- x86_perf_counter_update(counter, hwc, idx);
- __hw_perf_counter_set_period(counter, hwc, idx);
- if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- __pmc_generic_enable(counter, hwc, idx);
- }
- static void
- perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
- {
- struct perf_counter *counter, *group_leader = sibling->group_leader;
- /*
- * Store sibling timestamps (if any):
- */
- list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
- x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
- perf_store_irq_data(sibling, counter->hw_event.type);
- perf_store_irq_data(sibling, atomic64_read(&counter->count));
- }
- }
- /*
- * Maximum interrupt frequency of 100KHz per CPU
- */
- #define PERFMON_MAX_INTERRUPTS (100000/HZ)
- /*
- * This handler is triggered by the local APIC, so the APIC IRQ handling
- * rules apply:
- */
- static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
- {
- int bit, cpu = smp_processor_id();
- u64 ack, status;
- struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
- int ret = 0;
- cpuc->throttle_ctrl = hw_perf_save_disable();
- status = hw_perf_get_status(cpuc->throttle_ctrl);
- if (!status)
- goto out;
- ret = 1;
- again:
- inc_irq_stat(apic_perf_irqs);
- ack = status;
- for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
- struct perf_counter *counter = cpuc->counters[bit];
- clear_bit(bit, (unsigned long *) &status);
- if (!counter)
- continue;
- perf_save_and_restart(counter);
- switch (counter->hw_event.record_type) {
- case PERF_RECORD_SIMPLE:
- continue;
- case PERF_RECORD_IRQ:
- perf_store_irq_data(counter, instruction_pointer(regs));
- break;
- case PERF_RECORD_GROUP:
- perf_handle_group(counter, &status, &ack);
- break;
- }
- /*
- * From NMI context we cannot call into the scheduler to
- * do a task wakeup - but we mark these generic as
- * wakeup_pending and initate a wakeup callback:
- */
- if (nmi) {
- counter->wakeup_pending = 1;
- set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
- } else {
- wake_up(&counter->waitq);
- }
- }
- hw_perf_ack_status(ack);
- /*
- * Repeat if there is more work to be done:
- */
- status = hw_perf_get_status(cpuc->throttle_ctrl);
- if (status)
- goto again;
- out:
- /*
- * Restore - do not reenable when global enable is off or throttled:
- */
- if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
- hw_perf_restore(cpuc->throttle_ctrl);
- return ret;
- }
- void perf_counter_unthrottle(void)
- {
- struct cpu_hw_counters *cpuc;
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
- if (unlikely(!perf_counters_initialized))
- return;
- cpuc = &__get_cpu_var(cpu_hw_counters);
- if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
- if (printk_ratelimit())
- printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
- hw_perf_restore(cpuc->throttle_ctrl);
- }
- cpuc->interrupts = 0;
- }
- void smp_perf_counter_interrupt(struct pt_regs *regs)
- {
- irq_enter();
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- ack_APIC_irq();
- __smp_perf_counter_interrupt(regs, 0);
- irq_exit();
- }
- /*
- * This handler is triggered by NMI contexts:
- */
- void perf_counter_notify(struct pt_regs *regs)
- {
- struct cpu_hw_counters *cpuc;
- unsigned long flags;
- int bit, cpu;
- local_irq_save(flags);
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
- for_each_bit(bit, cpuc->used, X86_PMC_IDX_MAX) {
- struct perf_counter *counter = cpuc->counters[bit];
- if (!counter)
- continue;
- if (counter->wakeup_pending) {
- counter->wakeup_pending = 0;
- wake_up(&counter->waitq);
- }
- }
- local_irq_restore(flags);
- }
- void perf_counters_lapic_init(int nmi)
- {
- u32 apic_val;
- if (!perf_counters_initialized)
- return;
- /*
- * Enable the performance counter vector in the APIC LVT:
- */
- apic_val = apic_read(APIC_LVTERR);
- apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
- if (nmi)
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- else
- apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
- apic_write(APIC_LVTERR, apic_val);
- }
- static int __kprobes
- perf_counter_nmi_handler(struct notifier_block *self,
- unsigned long cmd, void *__args)
- {
- struct die_args *args = __args;
- struct pt_regs *regs;
- int ret;
- switch (cmd) {
- case DIE_NMI:
- case DIE_NMI_IPI:
- break;
- default:
- return NOTIFY_DONE;
- }
- regs = args->regs;
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = __smp_perf_counter_interrupt(regs, 1);
- return ret ? NOTIFY_STOP : NOTIFY_OK;
- }
- static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler,
- .next = NULL,
- .priority = 1
- };
- static struct pmc_x86_ops pmc_intel_ops = {
- .save_disable_all = pmc_intel_save_disable_all,
- .restore_all = pmc_intel_restore_all,
- .get_status = pmc_intel_get_status,
- .ack_status = pmc_intel_ack_status,
- .enable = pmc_intel_enable,
- .disable = pmc_intel_disable,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = pmc_intel_event_map,
- .raw_event = pmc_intel_raw_event,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- };
- static struct pmc_x86_ops pmc_amd_ops = {
- .save_disable_all = pmc_amd_save_disable_all,
- .restore_all = pmc_amd_restore_all,
- .get_status = pmc_amd_get_status,
- .ack_status = pmc_amd_ack_status,
- .enable = pmc_amd_enable,
- .disable = pmc_amd_disable,
- .eventsel = MSR_K7_EVNTSEL0,
- .perfctr = MSR_K7_PERFCTR0,
- .event_map = pmc_amd_event_map,
- .raw_event = pmc_amd_raw_event,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- };
- static struct pmc_x86_ops *pmc_intel_init(void)
- {
- union cpuid10_eax eax;
- unsigned int ebx;
- unsigned int unused;
- union cpuid10_edx edx;
- /*
- * Check whether the Architectural PerfMon supports
- * Branch Misses Retired Event or not.
- */
- cpuid(10, &eax.full, &ebx, &unused, &edx.full);
- if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
- return NULL;
- pr_info("Intel Performance Monitoring support detected.\n");
- pr_info("... version: %d\n", eax.split.version_id);
- pr_info("... bit width: %d\n", eax.split.bit_width);
- pr_info("... mask length: %d\n", eax.split.mask_length);
- nr_counters_generic = eax.split.num_counters;
- nr_counters_fixed = edx.split.num_counters_fixed;
- counter_value_mask = (1ULL << eax.split.bit_width) - 1;
- return &pmc_intel_ops;
- }
- static struct pmc_x86_ops *pmc_amd_init(void)
- {
- u64 old;
- int bits;
- nr_counters_generic = 4;
- nr_counters_fixed = 0;
- counter_value_mask = 0x0000FFFFFFFFFFFFULL;
- counter_value_bits = 48;
- pr_info("AMD Performance Monitoring support detected.\n");
- return &pmc_amd_ops;
- }
- void __init init_hw_perf_counters(void)
- {
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
- return;
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- pmc_ops = pmc_intel_init();
- break;
- case X86_VENDOR_AMD:
- pmc_ops = pmc_amd_init();
- break;
- }
- if (!pmc_ops)
- return;
- pr_info("... num counters: %d\n", nr_counters_generic);
- if (nr_counters_generic > X86_PMC_MAX_GENERIC) {
- nr_counters_generic = X86_PMC_MAX_GENERIC;
- WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
- nr_counters_generic, X86_PMC_MAX_GENERIC);
- }
- perf_counter_mask = (1 << nr_counters_generic) - 1;
- perf_max_counters = nr_counters_generic;
- pr_info("... value mask: %016Lx\n", counter_value_mask);
- if (nr_counters_fixed > X86_PMC_MAX_FIXED) {
- nr_counters_fixed = X86_PMC_MAX_FIXED;
- WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
- nr_counters_fixed, X86_PMC_MAX_FIXED);
- }
- pr_info("... fixed counters: %d\n", nr_counters_fixed);
- perf_counter_mask |= ((1LL << nr_counters_fixed)-1) << X86_PMC_IDX_FIXED;
- pr_info("... counter mask: %016Lx\n", perf_counter_mask);
- perf_counters_initialized = true;
- perf_counters_lapic_init(0);
- register_die_notifier(&perf_counter_nmi_notifier);
- }
- static void pmc_generic_read(struct perf_counter *counter)
- {
- x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
- }
- static const struct hw_perf_counter_ops x86_perf_counter_ops = {
- .enable = pmc_generic_enable,
- .disable = pmc_generic_disable,
- .read = pmc_generic_read,
- };
- const struct hw_perf_counter_ops *
- hw_perf_counter_init(struct perf_counter *counter)
- {
- int err;
- err = __hw_perf_counter_init(counter);
- if (err)
- return NULL;
- return &x86_perf_counter_ops;
- }
|