|
@@ -410,7 +410,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
|
|
|
* then we need to report up the rcu_node hierarchy.
|
|
|
*/
|
|
|
if (!empty_exp && empty_exp_now)
|
|
|
- rcu_report_exp_rnp(&rcu_preempt_state, rnp);
|
|
|
+ rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
|
|
|
} else {
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -732,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
|
|
|
* recursively up the tree. (Calm down, calm down, we do the recursion
|
|
|
* iteratively!)
|
|
|
*
|
|
|
+ * Most callers will set the "wake" flag, but the task initiating the
|
|
|
+ * expedited grace period need not wake itself.
|
|
|
+ *
|
|
|
* Caller must hold sync_rcu_preempt_exp_mutex.
|
|
|
*/
|
|
|
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
+ bool wake)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
unsigned long mask;
|
|
@@ -747,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
}
|
|
|
if (rnp->parent == NULL) {
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
- wake_up(&sync_rcu_preempt_exp_wq);
|
|
|
+ if (wake)
|
|
|
+ wake_up(&sync_rcu_preempt_exp_wq);
|
|
|
break;
|
|
|
}
|
|
|
mask = rnp->grpmask;
|
|
@@ -780,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
must_wait = 1;
|
|
|
}
|
|
|
if (!must_wait)
|
|
|
- rcu_report_exp_rnp(rsp, rnp);
|
|
|
+ rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1072,7 +1077,8 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
|
|
* report on tasks preempted in RCU read-side critical sections during
|
|
|
* expedited RCU grace periods.
|
|
|
*/
|
|
|
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
|
|
|
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
+ bool wake)
|
|
|
{
|
|
|
return;
|
|
|
}
|