|
@@ -379,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
|
|
|
int precise = 0;
|
|
|
|
|
|
/* Support for constant skid */
|
|
|
- if (x86_pmu.pebs_active) {
|
|
|
+ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
|
|
|
precise++;
|
|
|
|
|
|
/* Support for IP fixup */
|
|
@@ -1650,13 +1650,20 @@ static void x86_pmu_flush_branch_stack(void)
|
|
|
x86_pmu.flush_branch_stack();
|
|
|
}
|
|
|
|
|
|
+void perf_check_microcode(void)
|
|
|
+{
|
|
|
+ if (x86_pmu.check_microcode)
|
|
|
+ x86_pmu.check_microcode();
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(perf_check_microcode);
|
|
|
+
|
|
|
static struct pmu pmu = {
|
|
|
.pmu_enable = x86_pmu_enable,
|
|
|
.pmu_disable = x86_pmu_disable,
|
|
|
|
|
|
- .attr_groups = x86_pmu_attr_groups,
|
|
|
+ .attr_groups = x86_pmu_attr_groups,
|
|
|
|
|
|
- .event_init = x86_pmu_event_init,
|
|
|
+ .event_init = x86_pmu_event_init,
|
|
|
|
|
|
.add = x86_pmu_add,
|
|
|
.del = x86_pmu_del,
|
|
@@ -1664,11 +1671,11 @@ static struct pmu pmu = {
|
|
|
.stop = x86_pmu_stop,
|
|
|
.read = x86_pmu_read,
|
|
|
|
|
|
- .start_txn = x86_pmu_start_txn,
|
|
|
- .cancel_txn = x86_pmu_cancel_txn,
|
|
|
- .commit_txn = x86_pmu_commit_txn,
|
|
|
+ .start_txn = x86_pmu_start_txn,
|
|
|
+ .cancel_txn = x86_pmu_cancel_txn,
|
|
|
+ .commit_txn = x86_pmu_commit_txn,
|
|
|
|
|
|
- .event_idx = x86_pmu_event_idx,
|
|
|
+ .event_idx = x86_pmu_event_idx,
|
|
|
.flush_branch_stack = x86_pmu_flush_branch_stack,
|
|
|
};
|
|
|
|