|
@@ -947,8 +947,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
|
|
|
{
|
|
|
struct perf_event_context *ctx;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
retry:
|
|
|
+ /*
|
|
|
+ * One of the few rules of preemptible RCU is that one cannot do
|
|
|
+ * rcu_read_unlock() while holding a scheduler (or nested) lock when
|
|
|
+ * part of the read side critical section was preemptible -- see
|
|
|
+ * rcu_read_unlock_special().
|
|
|
+ *
|
|
|
+ * Since ctx->lock nests under rq->lock we must ensure the entire read
|
|
|
+ * side critical section is non-preemptible.
|
|
|
+ */
|
|
|
+ preempt_disable();
|
|
|
+ rcu_read_lock();
|
|
|
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
|
|
|
if (ctx) {
|
|
|
/*
|
|
@@ -964,6 +974,8 @@ retry:
|
|
|
raw_spin_lock_irqsave(&ctx->lock, *flags);
|
|
|
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
|
|
|
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
|
|
|
+ rcu_read_unlock();
|
|
|
+ preempt_enable();
|
|
|
goto retry;
|
|
|
}
|
|
|
|
|
@@ -973,6 +985,7 @@ retry:
|
|
|
}
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
+ preempt_enable();
|
|
|
return ctx;
|
|
|
}
|
|
|
|