|
@@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
|
|
|
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
|
|
if (iter->hw.bp_target == tsk &&
|
|
|
find_slot_idx(iter) == type &&
|
|
|
- cpu == iter->cpu)
|
|
|
+ (iter->cpu < 0 || cpu == iter->cpu))
|
|
|
count += hw_breakpoint_weight(iter);
|
|
|
}
|
|
|
|
|
@@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- for_each_online_cpu(cpu) {
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
unsigned int nr;
|
|
|
|
|
|
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
|
|
@@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
|
|
if (cpu >= 0) {
|
|
|
toggle_bp_task_slot(bp, cpu, enable, type, weight);
|
|
|
} else {
|
|
|
- for_each_online_cpu(cpu)
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
toggle_bp_task_slot(bp, cpu, enable, type, weight);
|
|
|
}
|
|
|
|