|
@@ -25,7 +25,7 @@
|
|
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
|
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
|
*
|
|
*
|
|
* For detailed explanation of Read-Copy Update mechanism see -
|
|
* For detailed explanation of Read-Copy Update mechanism see -
|
|
- * Documentation/RCU
|
|
|
|
|
|
+ * Documentation/RCU
|
|
*/
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kernel.h>
|
|
@@ -107,27 +107,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
|
|
*/
|
|
*/
|
|
void rcu_sched_qs(int cpu)
|
|
void rcu_sched_qs(int cpu)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
struct rcu_data *rdp;
|
|
struct rcu_data *rdp;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
rdp = &per_cpu(rcu_sched_data, cpu);
|
|
rdp = &per_cpu(rcu_sched_data, cpu);
|
|
- rdp->passed_quiesc = 1;
|
|
|
|
rdp->passed_quiesc_completed = rdp->completed;
|
|
rdp->passed_quiesc_completed = rdp->completed;
|
|
- rcu_preempt_qs(cpu);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
+ barrier();
|
|
|
|
+ rdp->passed_quiesc = 1;
|
|
|
|
+ rcu_preempt_note_context_switch(cpu);
|
|
}
|
|
}
|
|
|
|
|
|
void rcu_bh_qs(int cpu)
|
|
void rcu_bh_qs(int cpu)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
struct rcu_data *rdp;
|
|
struct rcu_data *rdp;
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
|
rdp = &per_cpu(rcu_bh_data, cpu);
|
|
rdp = &per_cpu(rcu_bh_data, cpu);
|
|
- rdp->passed_quiesc = 1;
|
|
|
|
rdp->passed_quiesc_completed = rdp->completed;
|
|
rdp->passed_quiesc_completed = rdp->completed;
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
+ barrier();
|
|
|
|
+ rdp->passed_quiesc = 1;
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ
|
|
#ifdef CONFIG_NO_HZ
|
|
@@ -605,8 +601,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
{
|
|
{
|
|
struct rcu_data *rdp = rsp->rda[smp_processor_id()];
|
|
struct rcu_data *rdp = rsp->rda[smp_processor_id()];
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
- struct rcu_node *rnp_cur;
|
|
|
|
- struct rcu_node *rnp_end;
|
|
|
|
|
|
|
|
if (!cpu_needs_another_gp(rsp, rdp)) {
|
|
if (!cpu_needs_another_gp(rsp, rdp)) {
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
@@ -615,6 +609,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
|
|
|
|
/* Advance to a new grace period and initialize state. */
|
|
/* Advance to a new grace period and initialize state. */
|
|
rsp->gpnum++;
|
|
rsp->gpnum++;
|
|
|
|
+ WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
|
|
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
|
|
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
|
|
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
|
|
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
|
|
record_gp_stall_check_time(rsp);
|
|
record_gp_stall_check_time(rsp);
|
|
@@ -631,7 +626,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
|
|
|
|
/* Special-case the common single-level case. */
|
|
/* Special-case the common single-level case. */
|
|
if (NUM_RCU_NODES == 1) {
|
|
if (NUM_RCU_NODES == 1) {
|
|
|
|
+ rcu_preempt_check_blocked_tasks(rnp);
|
|
rnp->qsmask = rnp->qsmaskinit;
|
|
rnp->qsmask = rnp->qsmaskinit;
|
|
|
|
+ rnp->gpnum = rsp->gpnum;
|
|
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
|
|
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
return;
|
|
return;
|
|
@@ -644,42 +641,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
spin_lock(&rsp->onofflock); /* irqs already disabled. */
|
|
spin_lock(&rsp->onofflock); /* irqs already disabled. */
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Set the quiescent-state-needed bits in all the non-leaf RCU
|
|
|
|
- * nodes for all currently online CPUs. This operation relies
|
|
|
|
- * on the layout of the hierarchy within the rsp->node[] array.
|
|
|
|
- * Note that other CPUs will access only the leaves of the
|
|
|
|
- * hierarchy, which still indicate that no grace period is in
|
|
|
|
- * progress. In addition, we have excluded CPU-hotplug operations.
|
|
|
|
- *
|
|
|
|
- * We therefore do not need to hold any locks. Any required
|
|
|
|
- * memory barriers will be supplied by the locks guarding the
|
|
|
|
- * leaf rcu_nodes in the hierarchy.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- rnp_end = rsp->level[NUM_RCU_LVLS - 1];
|
|
|
|
- for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
|
|
|
|
- rnp_cur->qsmask = rnp_cur->qsmaskinit;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Now set up the leaf nodes. Here we must be careful. First,
|
|
|
|
- * we need to hold the lock in order to exclude other CPUs, which
|
|
|
|
- * might be contending for the leaf nodes' locks. Second, as
|
|
|
|
- * soon as we initialize a given leaf node, its CPUs might run
|
|
|
|
- * up the rest of the hierarchy. We must therefore acquire locks
|
|
|
|
- * for each node that we touch during this stage. (But we still
|
|
|
|
- * are excluding CPU-hotplug operations.)
|
|
|
|
|
|
+ * Set the quiescent-state-needed bits in all the rcu_node
|
|
|
|
+ * structures for all currently online CPUs in breadth-first
|
|
|
|
+ * order, starting from the root rcu_node structure. This
|
|
|
|
+ * operation relies on the layout of the hierarchy within the
|
|
|
|
+ * rsp->node[] array. Note that other CPUs will access only
|
|
|
|
+ * the leaves of the hierarchy, which still indicate that no
|
|
|
|
+ * grace period is in progress, at least until the corresponding
|
|
|
|
+ * leaf node has been initialized. In addition, we have excluded
|
|
|
|
+ * CPU-hotplug operations.
|
|
*
|
|
*
|
|
* Note that the grace period cannot complete until we finish
|
|
* Note that the grace period cannot complete until we finish
|
|
* the initialization process, as there will be at least one
|
|
* the initialization process, as there will be at least one
|
|
* qsmask bit set in the root node until that time, namely the
|
|
* qsmask bit set in the root node until that time, namely the
|
|
- * one corresponding to this CPU.
|
|
|
|
|
|
+ * one corresponding to this CPU, due to the fact that we have
|
|
|
|
+ * irqs disabled.
|
|
*/
|
|
*/
|
|
- rnp_end = &rsp->node[NUM_RCU_NODES];
|
|
|
|
- rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
|
|
|
|
- for (; rnp_cur < rnp_end; rnp_cur++) {
|
|
|
|
- spin_lock(&rnp_cur->lock); /* irqs already disabled. */
|
|
|
|
- rnp_cur->qsmask = rnp_cur->qsmaskinit;
|
|
|
|
- spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
|
|
|
|
|
|
+ for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) {
|
|
|
|
+ spin_lock(&rnp->lock); /* irqs already disabled. */
|
|
|
|
+ rcu_preempt_check_blocked_tasks(rnp);
|
|
|
|
+ rnp->qsmask = rnp->qsmaskinit;
|
|
|
|
+ rnp->gpnum = rsp->gpnum;
|
|
|
|
+ spin_unlock(&rnp->lock); /* irqs already disabled. */
|
|
}
|
|
}
|
|
|
|
|
|
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
|
|
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
|
|
@@ -722,6 +705,7 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|
static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|
__releases(rnp->lock)
|
|
__releases(rnp->lock)
|
|
{
|
|
{
|
|
|
|
+ WARN_ON_ONCE(rsp->completed == rsp->gpnum);
|
|
rsp->completed = rsp->gpnum;
|
|
rsp->completed = rsp->gpnum;
|
|
rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
|
|
rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
|
|
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
|
|
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
|
|
@@ -739,6 +723,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
|
|
unsigned long flags)
|
|
unsigned long flags)
|
|
__releases(rnp->lock)
|
|
__releases(rnp->lock)
|
|
{
|
|
{
|
|
|
|
+ struct rcu_node *rnp_c;
|
|
|
|
+
|
|
/* Walk up the rcu_node hierarchy. */
|
|
/* Walk up the rcu_node hierarchy. */
|
|
for (;;) {
|
|
for (;;) {
|
|
if (!(rnp->qsmask & mask)) {
|
|
if (!(rnp->qsmask & mask)) {
|
|
@@ -762,8 +748,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
+ rnp_c = rnp;
|
|
rnp = rnp->parent;
|
|
rnp = rnp->parent;
|
|
spin_lock_irqsave(&rnp->lock, flags);
|
|
spin_lock_irqsave(&rnp->lock, flags);
|
|
|
|
+ WARN_ON_ONCE(rnp_c->qsmask);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -776,10 +764,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
|
|
|
/*
|
|
/*
|
|
* Record a quiescent state for the specified CPU, which must either be
|
|
* Record a quiescent state for the specified CPU, which must either be
|
|
- * the current CPU or an offline CPU. The lastcomp argument is used to
|
|
|
|
- * make sure we are still in the grace period of interest. We don't want
|
|
|
|
- * to end the current grace period based on quiescent states detected in
|
|
|
|
- * an earlier grace period!
|
|
|
|
|
|
+ * the current CPU. The lastcomp argument is used to make sure we are
|
|
|
|
+ * still in the grace period of interest. We don't want to end the current
|
|
|
|
+ * grace period based on quiescent states detected in an earlier grace
|
|
|
|
+ * period!
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
@@ -814,7 +802,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
|
|
* This GP can't end until cpu checks in, so all of our
|
|
* This GP can't end until cpu checks in, so all of our
|
|
* callbacks can be processed during the next GP.
|
|
* callbacks can be processed during the next GP.
|
|
*/
|
|
*/
|
|
- rdp = rsp->rda[smp_processor_id()];
|
|
|
|
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
|
|
|
cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
|
|
cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
|
|
@@ -872,7 +859,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|
spin_lock_irqsave(&rsp->onofflock, flags);
|
|
spin_lock_irqsave(&rsp->onofflock, flags);
|
|
|
|
|
|
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
|
|
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
|
|
- rnp = rdp->mynode;
|
|
|
|
|
|
+ rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
|
|
mask = rdp->grpmask; /* rnp->grplo is constant. */
|
|
mask = rdp->grpmask; /* rnp->grplo is constant. */
|
|
do {
|
|
do {
|
|
spin_lock(&rnp->lock); /* irqs already disabled. */
|
|
spin_lock(&rnp->lock); /* irqs already disabled. */
|
|
@@ -881,7 +868,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- rcu_preempt_offline_tasks(rsp, rnp);
|
|
|
|
|
|
+ rcu_preempt_offline_tasks(rsp, rnp, rdp);
|
|
mask = rnp->grpmask;
|
|
mask = rnp->grpmask;
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
|
rnp = rnp->parent;
|
|
rnp = rnp->parent;
|
|
@@ -890,9 +877,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
|
|
|
|
|
spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
|
|
spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
|
|
|
|
|
|
- /* Being offline is a quiescent state, so go record it. */
|
|
|
|
- cpu_quiet(cpu, rsp, rdp, lastcomp);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Move callbacks from the outgoing CPU to the running CPU.
|
|
* Move callbacks from the outgoing CPU to the running CPU.
|
|
* Note that the outgoing CPU is now quiscent, so it is now
|
|
* Note that the outgoing CPU is now quiscent, so it is now
|
|
@@ -1457,20 +1441,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
|
|
rnp = rnp->parent;
|
|
rnp = rnp->parent;
|
|
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
|
|
} while (rnp != NULL && !(rnp->qsmaskinit & mask));
|
|
|
|
|
|
- spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * A new grace period might start here. If so, we will be part of
|
|
|
|
- * it, and its gpnum will be greater than ours, so we will
|
|
|
|
- * participate. It is also possible for the gpnum to have been
|
|
|
|
- * incremented before this function was called, and the bitmasks
|
|
|
|
- * to not be filled out until now, in which case we will also
|
|
|
|
- * participate due to our gpnum being behind.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
- /* Since it is coming online, the CPU is in a quiescent state. */
|
|
|
|
- cpu_quiet(cpu, rsp, rdp, lastcomp);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&rsp->onofflock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void __cpuinit rcu_online_cpu(int cpu)
|
|
static void __cpuinit rcu_online_cpu(int cpu)
|