|
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
|
|
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
|
|
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
|
|
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
|
|
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
|
|
*/
|
|
*/
|
|
-int reserve_bp_slot(struct perf_event *bp)
|
|
|
|
|
|
+static int __reserve_bp_slot(struct perf_event *bp)
|
|
{
|
|
{
|
|
struct bp_busy_slots slots = {0};
|
|
struct bp_busy_slots slots = {0};
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- mutex_lock(&nr_bp_mutex);
|
|
|
|
|
|
|
|
fetch_bp_busy_slots(&slots, bp);
|
|
fetch_bp_busy_slots(&slots, bp);
|
|
|
|
|
|
/* Flexible counters need to keep at least one slot */
|
|
/* Flexible counters need to keep at least one slot */
|
|
- if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
|
|
|
|
- ret = -ENOSPC;
|
|
|
|
- goto end;
|
|
|
|
- }
|
|
|
|
|
|
+ if (slots.pinned + (!!slots.flexible) == HBP_NUM)
|
|
|
|
+ return -ENOSPC;
|
|
|
|
|
|
toggle_bp_slot(bp, true);
|
|
toggle_bp_slot(bp, true);
|
|
|
|
|
|
-end:
|
|
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int reserve_bp_slot(struct perf_event *bp)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&nr_bp_mutex);
|
|
|
|
+
|
|
|
|
+ ret = __reserve_bp_slot(bp);
|
|
|
|
+
|
|
mutex_unlock(&nr_bp_mutex);
|
|
mutex_unlock(&nr_bp_mutex);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __release_bp_slot(struct perf_event *bp)
|
|
|
|
+{
|
|
|
|
+ toggle_bp_slot(bp, false);
|
|
|
|
+}
|
|
|
|
+
|
|
void release_bp_slot(struct perf_event *bp)
|
|
void release_bp_slot(struct perf_event *bp)
|
|
{
|
|
{
|
|
mutex_lock(&nr_bp_mutex);
|
|
mutex_lock(&nr_bp_mutex);
|
|
|
|
|
|
- toggle_bp_slot(bp, false);
|
|
|
|
|
|
+ __release_bp_slot(bp);
|
|
|
|
|
|
mutex_unlock(&nr_bp_mutex);
|
|
mutex_unlock(&nr_bp_mutex);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Allow the kernel debugger to reserve breakpoint slots without
|
|
|
|
+ * taking a lock using the dbg_* variant of for the reserve and
|
|
|
|
+ * release breakpoint slots.
|
|
|
|
+ */
|
|
|
|
+int dbg_reserve_bp_slot(struct perf_event *bp)
|
|
|
|
+{
|
|
|
|
+ if (mutex_is_locked(&nr_bp_mutex))
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ return __reserve_bp_slot(bp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int dbg_release_bp_slot(struct perf_event *bp)
|
|
|
|
+{
|
|
|
|
+ if (mutex_is_locked(&nr_bp_mutex))
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ __release_bp_slot(bp);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
int register_perf_hw_breakpoint(struct perf_event *bp)
|
|
int register_perf_hw_breakpoint(struct perf_event *bp)
|
|
{
|
|
{
|