|
@@ -740,11 +740,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Clean up after the prior grace period and let rcu_start_gp() start up
|
|
|
|
- * the next grace period if one is needed. Note that the caller must
|
|
|
|
- * hold rnp->lock, as required by rcu_start_gp(), which will release it.
|
|
|
|
|
|
+ * Report a full set of quiescent states to the specified rcu_state
|
|
|
|
+ * data structure. This involves cleaning up after the prior grace
|
|
|
|
+ * period and letting rcu_start_gp() start up the next grace period
|
|
|
|
+ * if one is needed. Note that the caller must hold rnp->lock, as
|
|
|
|
+ * required by rcu_start_gp(), which will release it.
|
|
*/
|
|
*/
|
|
-static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|
|
|
|
|
+static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
|
__releases(rcu_get_root(rsp)->lock)
|
|
__releases(rcu_get_root(rsp)->lock)
|
|
{
|
|
{
|
|
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
|
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
|
@@ -754,15 +756,16 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Similar to cpu_quiet(), for which it is a helper function. Allows
|
|
|
|
- * a group of CPUs to be quieted at one go, though all the CPUs in the
|
|
|
|
- * group must be represented by the same leaf rcu_node structure.
|
|
|
|
- * That structure's lock must be held upon entry, and it is released
|
|
|
|
- * before return.
|
|
|
|
|
|
+ * Similar to rcu_report_qs_rdp(), for which it is a helper function.
|
|
|
|
+ * Allows quiescent states for a group of CPUs to be reported at one go
|
|
|
|
+ * to the specified rcu_node structure, though all the CPUs in the group
|
|
|
|
+ * must be represented by the same rcu_node structure (which need not be
|
|
|
|
+ * a leaf rcu_node structure, though it often will be). That structure's
|
|
|
|
+ * lock must be held upon entry, and it is released before return.
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
-cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
- unsigned long flags)
|
|
|
|
|
|
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|
|
|
+ struct rcu_node *rnp, unsigned long flags)
|
|
__releases(rnp->lock)
|
|
__releases(rnp->lock)
|
|
{
|
|
{
|
|
struct rcu_node *rnp_c;
|
|
struct rcu_node *rnp_c;
|
|
@@ -798,21 +801,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
|
|
/*
|
|
/*
|
|
* Get here if we are the last CPU to pass through a quiescent
|
|
* Get here if we are the last CPU to pass through a quiescent
|
|
- * state for this grace period. Invoke cpu_quiet_msk_finish()
|
|
|
|
|
|
+ * state for this grace period. Invoke rcu_report_qs_rsp()
|
|
* to clean up and start the next grace period if one is needed.
|
|
* to clean up and start the next grace period if one is needed.
|
|
*/
|
|
*/
|
|
- cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */
|
|
|
|
|
|
+ rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Record a quiescent state for the specified CPU, which must either be
|
|
|
|
- * the current CPU. The lastcomp argument is used to make sure we are
|
|
|
|
- * still in the grace period of interest. We don't want to end the current
|
|
|
|
- * grace period based on quiescent states detected in an earlier grace
|
|
|
|
- * period!
|
|
|
|
|
|
+ * Record a quiescent state for the specified CPU to that CPU's rcu_data
|
|
|
|
+ * structure. This must be either called from the specified CPU, or
|
|
|
|
+ * called when the specified CPU is known to be offline (and when it is
|
|
|
|
+ * also known that no other CPU is concurrently trying to help the offline
|
|
|
|
+ * CPU). The lastcomp argument is used to make sure we are still in the
|
|
|
|
+ * grace period of interest. We don't want to end the current grace period
|
|
|
|
+ * based on quiescent states detected in an earlier grace period!
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
-cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
|
|
|
|
+rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
{
|
|
{
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
unsigned long mask;
|
|
unsigned long mask;
|
|
@@ -827,8 +832,8 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
* The race with GP start is resolved by the fact that we
|
|
* The race with GP start is resolved by the fact that we
|
|
* hold the leaf rcu_node lock, so that the per-CPU bits
|
|
* hold the leaf rcu_node lock, so that the per-CPU bits
|
|
* cannot yet be initialized -- so we would simply find our
|
|
* cannot yet be initialized -- so we would simply find our
|
|
- * CPU's bit already cleared in cpu_quiet_msk() if this race
|
|
|
|
- * occurred.
|
|
|
|
|
|
+ * CPU's bit already cleared in rcu_report_qs_rnp() if this
|
|
|
|
+ * race occurred.
|
|
*/
|
|
*/
|
|
rdp->passed_quiesc = 0; /* try again later! */
|
|
rdp->passed_quiesc = 0; /* try again later! */
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
@@ -846,7 +851,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
*/
|
|
*/
|
|
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
|
|
|
- cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
|
|
|
|
|
|
+ rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -877,8 +882,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
if (!rdp->passed_quiesc)
|
|
if (!rdp->passed_quiesc)
|
|
return;
|
|
return;
|
|
|
|
|
|
- /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
|
|
|
|
- cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Tell RCU we are done (but rcu_report_qs_rdp() will be the
|
|
|
|
+ * judge of that).
|
|
|
|
+ */
|
|
|
|
+ rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
@@ -968,13 +976,13 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|
/*
|
|
/*
|
|
* We still hold the leaf rcu_node structure lock here, and
|
|
* We still hold the leaf rcu_node structure lock here, and
|
|
* irqs are still disabled. The reason for this subterfuge is
|
|
* irqs are still disabled. The reason for this subterfuge is
|
|
- * because invoking task_quiet() with ->onofflock held leads
|
|
|
|
- * to deadlock.
|
|
|
|
|
|
+ * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
|
|
|
|
+ * held leads to deadlock.
|
|
*/
|
|
*/
|
|
spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
|
|
spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
|
|
rnp = rdp->mynode;
|
|
rnp = rdp->mynode;
|
|
if (need_quiet)
|
|
if (need_quiet)
|
|
- task_quiet(rnp, flags);
|
|
|
|
|
|
+ rcu_report_unblock_qs_rnp(rnp, flags);
|
|
else
|
|
else
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
|
|
@@ -1164,8 +1172,8 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
|
|
}
|
|
}
|
|
if (mask != 0 && rnp->completed == lastcomp) {
|
|
if (mask != 0 && rnp->completed == lastcomp) {
|
|
|
|
|
|
- /* cpu_quiet_msk() releases rnp->lock. */
|
|
|
|
- cpu_quiet_msk(mask, rsp, rnp, flags);
|
|
|
|
|
|
+ /* rcu_report_qs_rnp() releases rnp->lock. */
|
|
|
|
+ rcu_report_qs_rnp(mask, rsp, rnp, flags);
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|