|
@@ -1867,7 +1867,19 @@ int perf_event_release_kernel(struct perf_event *event)
|
|
|
event->state = PERF_EVENT_STATE_FREE;
|
|
|
|
|
|
WARN_ON_ONCE(ctx->parent_ctx);
|
|
|
- mutex_lock(&ctx->mutex);
|
|
|
+ /*
|
|
|
+ * There are two ways this annotation is useful:
|
|
|
+ *
|
|
|
+ * 1) there is a lock recursion from perf_event_exit_task
|
|
|
+ * see the comment there.
|
|
|
+ *
|
|
|
+ * 2) there is a lock-inversion with mmap_sem through
|
|
|
+ * perf_event_read_group(), which takes faults while
|
|
|
+ * holding ctx->mutex, however this is called after
|
|
|
+ * the last filedesc died, so there is no possibility
|
|
|
+ * to trigger the AB-BA case.
|
|
|
+ */
|
|
|
+ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
|
perf_event_remove_from_context(event);
|
|
|
mutex_unlock(&ctx->mutex);
|
|
|
|
|
@@ -5305,7 +5317,7 @@ void perf_event_exit_task(struct task_struct *child)
|
|
|
*
|
|
|
* But since its the parent context it won't be the same instance.
|
|
|
*/
|
|
|
- mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
|
|
|
+ mutex_lock(&child_ctx->mutex);
|
|
|
|
|
|
again:
|
|
|
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
|