|
@@ -105,7 +105,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
|
|
|
* The rcu_scheduler_active variable transitions from zero to one just
|
|
|
* before the first task is spawned. So when this variable is zero, RCU
|
|
|
* can assume that there is but one task, allowing RCU to (for example)
|
|
|
- * optimized synchronize_sched() to a simple barrier(). When this variable
|
|
|
+ * optimize synchronize_sched() to a simple barrier(). When this variable
|
|
|
* is one, RCU must actually do all the hard work required to detect real
|
|
|
* grace periods. This variable is also used to suppress boot-time false
|
|
|
* positives from lockdep-RCU error checking.
|
|
@@ -217,12 +217,6 @@ module_param(blimit, long, 0444);
|
|
|
module_param(qhimark, long, 0444);
|
|
|
module_param(qlowmark, long, 0444);
|
|
|
|
|
|
-int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
|
|
|
-int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
|
|
|
-
|
|
|
-module_param(rcu_cpu_stall_suppress, int, 0644);
|
|
|
-module_param(rcu_cpu_stall_timeout, int, 0644);
|
|
|
-
|
|
|
static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
|
|
|
static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
|
|
|
|
|
@@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Does the current CPU require a yet-as-unscheduled grace period?
|
|
|
+ * Does the current CPU require a not-yet-started grace period?
|
|
|
+ * The caller must have disabled interrupts to prevent races with
|
|
|
+ * normal callback registry.
|
|
|
*/
|
|
|
static int
|
|
|
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
{
|
|
|
- struct rcu_head **ntp;
|
|
|
+ int i;
|
|
|
|
|
|
- ntp = rdp->nxttail[RCU_DONE_TAIL +
|
|
|
- (ACCESS_ONCE(rsp->completed) != rdp->completed)];
|
|
|
- return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp &&
|
|
|
- !rcu_gp_in_progress(rsp);
|
|
|
+ if (rcu_gp_in_progress(rsp))
|
|
|
+ return 0; /* No, a grace period is already in progress. */
|
|
|
+ if (!rdp->nxttail[RCU_NEXT_TAIL])
|
|
|
+ return 0; /* No, this is a no-CBs (or offline) CPU. */
|
|
|
+ if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
|
|
|
+ return 1; /* Yes, this CPU has newly registered callbacks. */
|
|
|
+ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
|
|
|
+ if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
|
|
|
+ ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
|
|
|
+ rdp->nxtcompleted[i]))
|
|
|
+ return 1; /* Yes, CBs for future grace period. */
|
|
|
+ return 0; /* No grace period needed. */
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -336,7 +340,7 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|
|
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
bool user)
|
|
|
{
|
|
|
- trace_rcu_dyntick("Start", oldval, 0);
|
|
|
+ trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
|
struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
|
|
@@ -727,7 +731,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|
|
* interrupt from idle, return true. The caller must have at least
|
|
|
* disabled preemption.
|
|
|
*/
|
|
|
-int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
+static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
{
|
|
|
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
|
|
|
}
|
|
@@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int jiffies_till_stall_check(void)
|
|
|
-{
|
|
|
- int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
|
|
|
-
|
|
|
- /*
|
|
|
- * Limit check must be consistent with the Kconfig limits
|
|
|
- * for CONFIG_RCU_CPU_STALL_TIMEOUT.
|
|
|
- */
|
|
|
- if (till_stall_check < 3) {
|
|
|
- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
|
|
|
- till_stall_check = 3;
|
|
|
- } else if (till_stall_check > 300) {
|
|
|
- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
|
|
|
- till_stall_check = 300;
|
|
|
- }
|
|
|
- return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
|
|
|
-}
|
|
|
-
|
|
|
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
{
|
|
|
rsp->gp_start = jiffies;
|
|
|
- rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
|
|
|
+ rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
return;
|
|
|
}
|
|
|
- rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
|
|
|
+ rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
|
|
/*
|
|
@@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
|
|
|
rsp->jiffies_stall = jiffies +
|
|
|
- 3 * jiffies_till_stall_check() + 3;
|
|
|
+ 3 * rcu_jiffies_till_stall_check() + 3;
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
|
|
set_need_resched(); /* kick ourselves to get things going. */
|
|
@@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
|
|
|
-{
|
|
|
- rcu_cpu_stall_suppress = 1;
|
|
|
- return NOTIFY_DONE;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* rcu_cpu_stall_reset - prevent further stall warnings in current grace period
|
|
|
*
|
|
@@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void)
|
|
|
rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
|
|
|
}
|
|
|
|
|
|
-static struct notifier_block rcu_panic_block = {
|
|
|
- .notifier_call = rcu_panic,
|
|
|
-};
|
|
|
-
|
|
|
-static void __init check_cpu_stall_init(void)
|
|
|
-{
|
|
|
- atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Update CPU-local rcu_data state to record the newly noticed grace period.
|
|
|
* This is used both when we started the grace period and when we notice
|
|
@@ -1070,6 +1041,145 @@ static void init_callback_list(struct rcu_data *rdp)
|
|
|
init_nocb_callback_list(rdp);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Determine the value that ->completed will have at the end of the
|
|
|
+ * next subsequent grace period. This is used to tag callbacks so that
|
|
|
+ * a CPU can invoke callbacks in a timely fashion even if that CPU has
|
|
|
+ * been dyntick-idle for an extended period with callbacks under the
|
|
|
+ * influence of RCU_FAST_NO_HZ.
|
|
|
+ *
|
|
|
+ * The caller must hold rnp->lock with interrupts disabled.
|
|
|
+ */
|
|
|
+static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
|
|
|
+ struct rcu_node *rnp)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If RCU is idle, we just wait for the next grace period.
|
|
|
+ * But we can only be sure that RCU is idle if we are looking
|
|
|
+ * at the root rcu_node structure -- otherwise, a new grace
|
|
|
+ * period might have started, but just not yet gotten around
|
|
|
+ * to initializing the current non-root rcu_node structure.
|
|
|
+ */
|
|
|
+ if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
|
|
|
+ return rnp->completed + 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Otherwise, wait for a possible partial grace period and
|
|
|
+ * then the subsequent full grace period.
|
|
|
+ */
|
|
|
+ return rnp->completed + 2;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If there is room, assign a ->completed number to any callbacks on
|
|
|
+ * this CPU that have not already been assigned. Also accelerate any
|
|
|
+ * callbacks that were previously assigned a ->completed number that has
|
|
|
+ * since proven to be too conservative, which can happen if callbacks get
|
|
|
+ * assigned a ->completed number while RCU is idle, but with reference to
|
|
|
+ * a non-root rcu_node structure. This function is idempotent, so it does
|
|
|
+ * not hurt to call it repeatedly.
|
|
|
+ *
|
|
|
+ * The caller must hold rnp->lock with interrupts disabled.
|
|
|
+ */
|
|
|
+static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
+ struct rcu_data *rdp)
|
|
|
+{
|
|
|
+ unsigned long c;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* If the CPU has no callbacks, nothing to do. */
|
|
|
+ if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Starting from the sublist containing the callbacks most
|
|
|
+ * recently assigned a ->completed number and working down, find the
|
|
|
+ * first sublist that is not assignable to an upcoming grace period.
|
|
|
+ * Such a sublist has something in it (first two tests) and has
|
|
|
+ * a ->completed number assigned that will complete sooner than
|
|
|
+ * the ->completed number for newly arrived callbacks (last test).
|
|
|
+ *
|
|
|
+ * The key point is that any later sublist can be assigned the
|
|
|
+ * same ->completed number as the newly arrived callbacks, which
|
|
|
+ * means that the callbacks in any of these later sublist can be
|
|
|
+ * grouped into a single sublist, whether or not they have already
|
|
|
+ * been assigned a ->completed number.
|
|
|
+ */
|
|
|
+ c = rcu_cbs_completed(rsp, rnp);
|
|
|
+ for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
|
|
|
+ if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
|
|
|
+ !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
|
|
|
+ break;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If there are no sublist for unassigned callbacks, leave.
|
|
|
+ * At the same time, advance "i" one sublist, so that "i" will
|
|
|
+ * index into the sublist where all the remaining callbacks should
|
|
|
+ * be grouped into.
|
|
|
+ */
|
|
|
+ if (++i >= RCU_NEXT_TAIL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Assign all subsequent callbacks' ->completed number to the next
|
|
|
+ * full grace period and group them all in the sublist initially
|
|
|
+ * indexed by "i".
|
|
|
+ */
|
|
|
+ for (; i <= RCU_NEXT_TAIL; i++) {
|
|
|
+ rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
+ rdp->nxtcompleted[i] = c;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Trace depending on how much we were able to accelerate. */
|
|
|
+ if (!*rdp->nxttail[RCU_WAIT_TAIL])
|
|
|
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB");
|
|
|
+ else
|
|
|
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB");
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Move any callbacks whose grace period has completed to the
|
|
|
+ * RCU_DONE_TAIL sublist, then compact the remaining sublists and
|
|
|
+ * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
|
|
|
+ * sublist. This function is idempotent, so it does not hurt to
|
|
|
+ * invoke it repeatedly. As long as it is not invoked -too- often...
|
|
|
+ *
|
|
|
+ * The caller must hold rnp->lock with interrupts disabled.
|
|
|
+ */
|
|
|
+static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
+ struct rcu_data *rdp)
|
|
|
+{
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ /* If the CPU has no callbacks, nothing to do. */
|
|
|
+ if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Find all callbacks whose ->completed numbers indicate that they
|
|
|
+ * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
|
|
|
+ */
|
|
|
+ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
|
|
|
+ if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
|
|
|
+ break;
|
|
|
+ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
|
|
|
+ }
|
|
|
+ /* Clean up any sublist tail pointers that were misordered above. */
|
|
|
+ for (j = RCU_WAIT_TAIL; j < i; j++)
|
|
|
+ rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
|
|
|
+
|
|
|
+ /* Copy down callbacks to fill in empty sublists. */
|
|
|
+ for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
|
|
|
+ if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
|
|
|
+ break;
|
|
|
+ rdp->nxttail[j] = rdp->nxttail[i];
|
|
|
+ rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Classify any remaining callbacks. */
|
|
|
+ rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Advance this CPU's callbacks, but only if the current grace period
|
|
|
* has ended. This may be called only from the CPU to whom the rdp
|
|
@@ -1080,12 +1190,15 @@ static void
|
|
|
__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
/* Did another grace period end? */
|
|
|
- if (rdp->completed != rnp->completed) {
|
|
|
+ if (rdp->completed == rnp->completed) {
|
|
|
|
|
|
- /* Advance callbacks. No harm if list empty. */
|
|
|
- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
|
|
|
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
|
|
|
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
+ /* No, so just accelerate recent callbacks. */
|
|
|
+ rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
+
|
|
|
+ } else {
|
|
|
+
|
|
|
+ /* Advance callbacks. */
|
|
|
+ rcu_advance_cbs(rsp, rnp, rdp);
|
|
|
|
|
|
/* Remember that we saw this grace-period completion. */
|
|
|
rdp->completed = rnp->completed;
|
|
@@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|
|
/*
|
|
|
* Because there is no grace period in progress right now,
|
|
|
* any callbacks we have up to this point will be satisfied
|
|
|
- * by the next grace period. So promote all callbacks to be
|
|
|
- * handled after the end of the next grace period. If the
|
|
|
- * CPU is not yet aware of the end of the previous grace period,
|
|
|
- * we need to allow for the callback advancement that will
|
|
|
- * occur when it does become aware. Deadlock prevents us from
|
|
|
- * making it aware at this point: We cannot acquire a leaf
|
|
|
- * rcu_node ->lock while holding the root rcu_node ->lock.
|
|
|
+ * by the next grace period. So this is a good place to
|
|
|
+ * assign a grace period number to recently posted callbacks.
|
|
|
*/
|
|
|
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
- if (rdp->completed == rsp->completed)
|
|
|
- rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
+ rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
|
|
|
rsp->gp_flags = RCU_GP_FLAG_INIT;
|
|
|
raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
|
|
@@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
* This GP can't end until cpu checks in, so all of our
|
|
|
* callbacks can be processed during the next GP.
|
|
|
*/
|
|
|
- rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
|
|
|
+ rcu_accelerate_cbs(rsp, rnp, rdp);
|
|
|
|
|
|
rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
|
|
|
}
|
|
@@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
long bl, count, count_lazy;
|
|
|
int i;
|
|
|
|
|
|
- /* If no callbacks are ready, just return.*/
|
|
|
+ /* If no callbacks are ready, just return. */
|
|
|
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
|
|
|
trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
|
|
|
trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
|
|
@@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|
|
|
|
|
WARN_ON_ONCE(rdp->beenonline == 0);
|
|
|
|
|
|
- /*
|
|
|
- * Advance callbacks in response to end of earlier grace
|
|
|
- * period that some other CPU ended.
|
|
|
- */
|
|
|
+ /* Handle the end of a grace period that some other CPU ended. */
|
|
|
rcu_process_gp_end(rsp, rdp);
|
|
|
|
|
|
/* Update RCU state based on any recent quiescent states. */
|
|
|
rcu_check_quiescent_state(rsp, rdp);
|
|
|
|
|
|
/* Does this CPU require a not-yet-started grace period? */
|
|
|
+ local_irq_save(flags);
|
|
|
if (cpu_needs_another_gp(rsp, rdp)) {
|
|
|
- raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
|
|
|
+ raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
|
|
|
rcu_start_gp(rsp, flags); /* releases above lock */
|
|
|
+ } else {
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
/* If there are callbacks ready, invoke them. */
|
|
@@ -2719,9 +2825,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
|
|
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
|
|
-#ifdef CONFIG_RCU_USER_QS
|
|
|
- WARN_ON_ONCE(rdp->dynticks->in_user);
|
|
|
-#endif
|
|
|
rdp->cpu = cpu;
|
|
|
rdp->rsp = rsp;
|
|
|
rcu_boot_init_nocb_percpu_data(rdp);
|
|
@@ -2938,6 +3041,10 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
|
|
|
|
|
BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
|
|
|
|
|
|
+ /* Silence gcc 4.8 warning about array index out of range. */
|
|
|
+ if (rcu_num_lvls > RCU_NUM_LVLS)
|
|
|
+ panic("rcu_init_one: rcu_num_lvls overflow");
|
|
|
+
|
|
|
/* Initialize the level-tracking arrays. */
|
|
|
|
|
|
for (i = 0; i < rcu_num_lvls; i++)
|
|
@@ -3074,7 +3181,6 @@ void __init rcu_init(void)
|
|
|
cpu_notifier(rcu_cpu_notify, 0);
|
|
|
for_each_online_cpu(cpu)
|
|
|
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
|
|
- check_cpu_stall_init();
|
|
|
}
|
|
|
|
|
|
#include "rcutree_plugin.h"
|