|
@@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
|
|
* Count the number of breakpoints of the same type and same task.
|
|
|
* The given event must be not on the list.
|
|
|
*/
|
|
|
-static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
|
|
|
+static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
|
|
|
{
|
|
|
struct task_struct *tsk = bp->hw.bp_target;
|
|
|
struct perf_event *iter;
|
|
|
int count = 0;
|
|
|
|
|
|
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
|
|
|
- if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
|
|
|
+ if (iter->hw.bp_target == tsk &&
|
|
|
+ find_slot_idx(iter) == type &&
|
|
|
+ cpu == iter->cpu)
|
|
|
count += hw_breakpoint_weight(iter);
|
|
|
}
|
|
|
|
|
@@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|
|
if (!tsk)
|
|
|
slots->pinned += max_task_bp_pinned(cpu, type);
|
|
|
else
|
|
|
- slots->pinned += task_bp_pinned(bp, type);
|
|
|
+ slots->pinned += task_bp_pinned(cpu, bp, type);
|
|
|
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
|
|
|
|
|
|
return;
|
|
@@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
|
|
if (!tsk)
|
|
|
nr += max_task_bp_pinned(cpu, type);
|
|
|
else
|
|
|
- nr += task_bp_pinned(bp, type);
|
|
|
+ nr += task_bp_pinned(cpu, bp, type);
|
|
|
|
|
|
if (nr > slots->pinned)
|
|
|
slots->pinned = nr;
|
|
@@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
|
|
|
int old_idx = 0;
|
|
|
int idx = 0;
|
|
|
|
|
|
- old_count = task_bp_pinned(bp, type);
|
|
|
+ old_count = task_bp_pinned(cpu, bp, type);
|
|
|
old_idx = old_count - 1;
|
|
|
idx = old_idx + weight;
|
|
|
|