|
@@ -626,7 +626,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
|
|
c = sched->constraints[sched->state.event];
|
|
c = sched->constraints[sched->state.event];
|
|
|
|
|
|
/* Prefer fixed purpose counters */
|
|
/* Prefer fixed purpose counters */
|
|
- if (x86_pmu.num_counters_fixed) {
|
|
|
|
|
|
+ if (c->idxmsk64 & (~0ULL << X86_PMC_IDX_FIXED)) {
|
|
idx = X86_PMC_IDX_FIXED;
|
|
idx = X86_PMC_IDX_FIXED;
|
|
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
|
|
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
|
|
if (!__test_and_set_bit(idx, sched->state.used))
|
|
if (!__test_and_set_bit(idx, sched->state.used))
|
|
@@ -693,8 +693,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
|
|
/*
|
|
/*
|
|
* Assign a counter for each event.
|
|
* Assign a counter for each event.
|
|
*/
|
|
*/
|
|
-static int perf_assign_events(struct event_constraint **constraints, int n,
|
|
|
|
- int wmin, int wmax, int *assign)
|
|
|
|
|
|
+int perf_assign_events(struct event_constraint **constraints, int n,
|
|
|
|
+ int wmin, int wmax, int *assign)
|
|
{
|
|
{
|
|
struct perf_sched sched;
|
|
struct perf_sched sched;
|
|
|
|
|