|
@@ -29,9 +29,9 @@ static u64 perf_counter_mask __read_mostly;
|
|
|
struct cpu_hw_counters {
|
|
|
struct perf_counter *counters[X86_PMC_IDX_MAX];
|
|
|
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
+ unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
unsigned long interrupts;
|
|
|
u64 throttle_ctrl;
|
|
|
- unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
int enabled;
|
|
|
};
|
|
|
|
|
@@ -334,7 +334,7 @@ static u64 amd_pmu_save_disable_all(void)
|
|
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
u64 val;
|
|
|
|
|
|
- if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ if (!test_bit(idx, cpuc->active))
|
|
|
continue;
|
|
|
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
|
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
|
|
@@ -376,7 +376,7 @@ static void amd_pmu_restore_all(u64 ctrl)
|
|
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
u64 val;
|
|
|
|
|
|
- if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ if (!test_bit(idx, cpuc->active))
|
|
|
continue;
|
|
|
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
|
|
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
|
|
@@ -424,7 +424,7 @@ static void amd_pmu_enable_counter(int idx, u64 config)
|
|
|
{
|
|
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
|
|
- set_bit(idx, cpuc->active_mask);
|
|
|
+ set_bit(idx, cpuc->active);
|
|
|
if (cpuc->enabled)
|
|
|
config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
|
|
|
|
@@ -448,7 +448,7 @@ static void amd_pmu_disable_counter(int idx, u64 config)
|
|
|
{
|
|
|
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
|
|
|
|
|
- clear_bit(idx, cpuc->active_mask);
|
|
|
+ clear_bit(idx, cpuc->active);
|
|
|
wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
|
|
|
|
|
|
}
|