|
@@ -63,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
|
|
|
int idx = hwc->idx;
|
|
|
s64 delta;
|
|
|
|
|
|
- if (idx == X86_PMC_IDX_FIXED_BTS)
|
|
|
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
@@ -626,8 +626,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
|
|
|
c = sched->constraints[sched->state.event];
|
|
|
|
|
|
/* Prefer fixed purpose counters */
|
|
|
- if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
|
|
|
- idx = X86_PMC_IDX_FIXED;
|
|
|
+ if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
|
|
|
+ idx = INTEL_PMC_IDX_FIXED;
|
|
|
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
|
|
|
if (!__test_and_set_bit(idx, sched->state.used))
|
|
|
goto done;
|
|
@@ -635,7 +635,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
|
|
|
}
|
|
|
/* Grab the first unused counter starting with idx */
|
|
|
idx = sched->state.counter;
|
|
|
- for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
|
|
|
+ for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
|
|
|
if (!__test_and_set_bit(idx, sched->state.used))
|
|
|
goto done;
|
|
|
}
|
|
@@ -813,13 +813,13 @@ static inline void x86_assign_hw_event(struct perf_event *event,
|
|
|
hwc->last_cpu = smp_processor_id();
|
|
|
hwc->last_tag = ++cpuc->tags[i];
|
|
|
|
|
|
- if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
|
|
|
+ if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
|
|
|
hwc->config_base = 0;
|
|
|
hwc->event_base = 0;
|
|
|
- } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
|
|
|
+ } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
|
|
|
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
|
|
|
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
|
|
|
- hwc->event_base_rdpmc = (hwc->idx - X86_PMC_IDX_FIXED) | 1<<30;
|
|
|
+ hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
|
|
|
+ hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
|
|
|
} else {
|
|
|
hwc->config_base = x86_pmu_config_addr(hwc->idx);
|
|
|
hwc->event_base = x86_pmu_event_addr(hwc->idx);
|
|
@@ -921,7 +921,7 @@ int x86_perf_event_set_period(struct perf_event *event)
|
|
|
s64 period = hwc->sample_period;
|
|
|
int ret = 0, idx = hwc->idx;
|
|
|
|
|
|
- if (idx == X86_PMC_IDX_FIXED_BTS)
|
|
|
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
|
|
|
return 0;
|
|
|
|
|
|
/*
|
|
@@ -1338,21 +1338,21 @@ static int __init init_hw_perf_events(void)
|
|
|
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
|
|
|
quirk->func();
|
|
|
|
|
|
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
|
|
|
+ if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
|
|
|
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
|
|
|
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
|
|
|
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
|
|
|
+ x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
|
|
|
+ x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
|
|
|
}
|
|
|
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
|
|
|
|
|
|
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
|
|
|
+ if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
|
|
|
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
|
|
|
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
|
|
|
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
|
|
|
+ x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
|
|
|
+ x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
|
|
|
}
|
|
|
|
|
|
x86_pmu.intel_ctrl |=
|
|
|
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
|
|
|
+ ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
|
|
|
|
|
|
perf_events_lapic_init();
|
|
|
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
|
|
@@ -1368,7 +1368,7 @@ static int __init init_hw_perf_events(void)
|
|
|
*/
|
|
|
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
|
|
if (c->cmask != X86_RAW_EVENT_MASK
|
|
|
- || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
|
|
|
+ || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1611,8 +1611,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
|
|
|
if (!x86_pmu.attr_rdpmc)
|
|
|
return 0;
|
|
|
|
|
|
- if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
|
|
|
- idx -= X86_PMC_IDX_FIXED;
|
|
|
+ if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
|
|
|
+ idx -= INTEL_PMC_IDX_FIXED;
|
|
|
idx |= 1 << 30;
|
|
|
}
|
|
|
|