|
@@ -749,7 +749,8 @@ static void intel_pmu_enable_all(int added)
|
|
|
|
|
|
intel_pmu_pebs_enable_all();
|
|
|
intel_pmu_lbr_enable_all();
|
|
|
- wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
|
|
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
|
|
|
+ x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
|
|
|
|
|
|
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
|
|
struct perf_event *event =
|
|
@@ -872,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
|
|
static void intel_pmu_disable_event(struct perf_event *event)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
|
|
intel_pmu_disable_bts();
|
|
@@ -879,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
|
|
|
+ cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
|
|
|
+
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
|
intel_pmu_disable_fixed(hwc);
|
|
|
return;
|
|
@@ -924,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|
|
static void intel_pmu_enable_event(struct perf_event *event)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
|
|
if (!__this_cpu_read(cpu_hw_events.enabled))
|
|
@@ -933,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ if (event->attr.exclude_host)
|
|
|
+ cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
|
|
|
+ if (event->attr.exclude_guest)
|
|
|
+ cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
|
|
|
+
|
|
|
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
|
|
intel_pmu_enable_fixed(hwc);
|
|
|
return;
|
|
@@ -1302,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
|
|
|
+{
|
|
|
+ if (x86_pmu.guest_get_msrs)
|
|
|
+ return x86_pmu.guest_get_msrs(nr);
|
|
|
+ *nr = 0;
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
|
|
|
+
|
|
|
+static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
|
|
|
+
|
|
|
+ arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
|
|
|
+ arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
|
|
|
+ arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
|
|
|
+
|
|
|
+ *nr = 1;
|
|
|
+ return arr;
|
|
|
+}
|
|
|
+
|
|
|
+static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
+ struct perf_event *event = cpuc->events[idx];
|
|
|
+
|
|
|
+ arr[idx].msr = x86_pmu_config_addr(idx);
|
|
|
+ arr[idx].host = arr[idx].guest = 0;
|
|
|
+
|
|
|
+ if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ arr[idx].host = arr[idx].guest =
|
|
|
+ event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
|
|
|
+
|
|
|
+ if (event->attr.exclude_host)
|
|
|
+ arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
|
|
|
+ else if (event->attr.exclude_guest)
|
|
|
+ arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
|
|
|
+ }
|
|
|
+
|
|
|
+ *nr = x86_pmu.num_counters;
|
|
|
+ return arr;
|
|
|
+}
|
|
|
+
|
|
|
+static void core_pmu_enable_event(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (!event->attr.exclude_host)
|
|
|
+ x86_pmu_enable_event(event);
|
|
|
+}
|
|
|
+
|
|
|
+static void core_pmu_enable_all(int added)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
+ struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
|
|
|
+
|
|
|
+ if (!test_bit(idx, cpuc->active_mask) ||
|
|
|
+ cpuc->events[idx]->attr.exclude_host)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static __initconst const struct x86_pmu core_pmu = {
|
|
|
.name = "core",
|
|
|
.handle_irq = x86_pmu_handle_irq,
|
|
|
.disable_all = x86_pmu_disable_all,
|
|
|
- .enable_all = x86_pmu_enable_all,
|
|
|
- .enable = x86_pmu_enable_event,
|
|
|
+ .enable_all = core_pmu_enable_all,
|
|
|
+ .enable = core_pmu_enable_event,
|
|
|
.disable = x86_pmu_disable_event,
|
|
|
.hw_config = x86_pmu_hw_config,
|
|
|
.schedule_events = x86_schedule_events,
|
|
@@ -1325,6 +1408,7 @@ static __initconst const struct x86_pmu core_pmu = {
|
|
|
.get_event_constraints = intel_get_event_constraints,
|
|
|
.put_event_constraints = intel_put_event_constraints,
|
|
|
.event_constraints = intel_core_event_constraints,
|
|
|
+ .guest_get_msrs = core_guest_get_msrs,
|
|
|
};
|
|
|
|
|
|
struct intel_shared_regs *allocate_shared_regs(int cpu)
|
|
@@ -1431,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
|
.cpu_prepare = intel_pmu_cpu_prepare,
|
|
|
.cpu_starting = intel_pmu_cpu_starting,
|
|
|
.cpu_dying = intel_pmu_cpu_dying,
|
|
|
+ .guest_get_msrs = intel_guest_get_msrs,
|
|
|
};
|
|
|
|
|
|
static void intel_clovertown_quirks(void)
|