|
@@ -913,7 +913,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
break;
|
|
|
}
|
|
|
- rcu_preempt_offline_tasks(rsp, rnp, rdp);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If there was a task blocking the current grace period,
|
|
|
+ * and if all CPUs have checked in, we need to propagate
|
|
|
+ * the quiescent state up the rcu_node hierarchy. But that
|
|
|
+ * is inconvenient at the moment due to deadlock issues if
|
|
|
+ * this should end the current grace period. So set the
|
|
|
+ * offlined CPU's bit in ->qsmask in order to force the
|
|
|
+ * next force_quiescent_state() invocation to clean up this
|
|
|
+ * mess in a deadlock-free manner.
|
|
|
+ */
|
|
|
+ if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
|
|
|
+ rnp->qsmask |= mask;
|
|
|
+
|
|
|
mask = rnp->grpmask;
|
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
rnp = rnp->parent;
|
|
@@ -958,7 +971,7 @@ static void rcu_offline_cpu(int cpu)
|
|
|
* Invoke any RCU callbacks that have made it to the end of their grace
|
|
|
* period. Thottle as specified by rdp->blimit.
|
|
|
*/
|
|
|
-static void rcu_do_batch(struct rcu_data *rdp)
|
|
|
+static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rcu_head *next, *list, **tail;
|
|
@@ -1011,6 +1024,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
|
|
if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
|
|
|
rdp->blimit = blimit;
|
|
|
|
|
|
+ /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
|
|
|
+ if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
|
|
|
+ rdp->qlen_last_fqs_check = 0;
|
|
|
+ rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
|
|
|
+ rdp->qlen_last_fqs_check = rdp->qlen;
|
|
|
+
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
/* Re-raise the RCU softirq if there are callbacks remaining. */
|
|
@@ -1224,7 +1244,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
}
|
|
|
|
|
|
/* If there are callbacks ready, invoke them. */
|
|
|
- rcu_do_batch(rdp);
|
|
|
+ rcu_do_batch(rsp, rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1288,10 +1308,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
|
|
rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
|
|
|
}
|
|
|
|
|
|
- /* Force the grace period if too many callbacks or too long waiting. */
|
|
|
- if (unlikely(++rdp->qlen > qhimark)) {
|
|
|
+ /*
|
|
|
+ * Force the grace period if too many callbacks or too long waiting.
|
|
|
+ * Enforce hysteresis, and don't invoke force_quiescent_state()
|
|
|
+ * if some other CPU has recently done so. Also, don't bother
|
|
|
+ * invoking force_quiescent_state() if the newly enqueued callback
|
|
|
+ * is the only one waiting for a grace period to complete.
|
|
|
+ */
|
|
|
+ if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
|
|
rdp->blimit = LONG_MAX;
|
|
|
- force_quiescent_state(rsp, 0);
|
|
|
+ if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
|
|
+ *rdp->nxttail[RCU_DONE_TAIL] != head)
|
|
|
+ force_quiescent_state(rsp, 0);
|
|
|
+ rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ rdp->qlen_last_fqs_check = rdp->qlen;
|
|
|
} else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
|
|
|
force_quiescent_state(rsp, 1);
|
|
|
local_irq_restore(flags);
|
|
@@ -1523,6 +1553,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
|
|
|
rdp->beenonline = 1; /* We have now been online. */
|
|
|
rdp->preemptable = preemptable;
|
|
|
rdp->passed_quiesc_completed = lastcomp - 1;
|
|
|
+ rdp->qlen_last_fqs_check = 0;
|
|
|
+ rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
rdp->blimit = blimit;
|
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
|
|