|
@@ -1,4 +1,5 @@
|
|
|
#include <linux/perf_event.h>
|
|
|
+#include <linux/export.h>
|
|
|
#include <linux/types.h>
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/slab.h>
|
|
@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
|
|
|
struct amd_nb *nb;
|
|
|
int i, nb_id;
|
|
|
|
|
|
- if (boot_cpu_data.x86_max_cores < 2)
|
|
|
+ cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
|
|
|
+
|
|
|
+ if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
|
|
|
return;
|
|
|
|
|
|
nb_id = amd_get_nb_id(cpu);
|
|
@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
|
|
|
.put_event_constraints = amd_put_event_constraints,
|
|
|
|
|
|
.cpu_prepare = amd_pmu_cpu_prepare,
|
|
|
- .cpu_starting = amd_pmu_cpu_starting,
|
|
|
.cpu_dead = amd_pmu_cpu_dead,
|
|
|
#endif
|
|
|
+ .cpu_starting = amd_pmu_cpu_starting,
|
|
|
};
|
|
|
|
|
|
__init int amd_pmu_init(void)
|
|
@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+void amd_pmu_enable_virt(void)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+
|
|
|
+ cpuc->perf_ctr_virt_mask = 0;
|
|
|
+
|
|
|
+ /* Reload all events */
|
|
|
+ x86_pmu_disable_all();
|
|
|
+ x86_pmu_enable_all(0);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
|
|
|
+
|
|
|
+void amd_pmu_disable_virt(void)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We only mask out the Host-only bit so that host-only counting works
|
|
|
+ * when SVM is disabled. If someone sets up a guest-only counter when
|
|
|
+ * SVM is disabled the Guest-only bits still gets set and the counter
|
|
|
+ * will not count anything.
|
|
|
+ */
|
|
|
+ cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
|
|
|
+
|
|
|
+ /* Reload all events */
|
|
|
+ x86_pmu_disable_all();
|
|
|
+ x86_pmu_enable_all(0);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
|