|
@@ -359,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
|
|
|
- ftrace_dump(DUMP_ALL);
|
|
|
+ ftrace_dump(DUMP_ORIG);
|
|
|
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
|
|
current->pid, current->comm,
|
|
|
idle->pid, idle->comm); /* must be idle task! */
|
|
@@ -469,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
|
|
|
trace_rcu_dyntick("Error on exit: not idle task",
|
|
|
oldval, rdtp->dynticks_nesting);
|
|
|
- ftrace_dump(DUMP_ALL);
|
|
|
+ ftrace_dump(DUMP_ORIG);
|
|
|
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
|
|
|
current->pid, current->comm,
|
|
|
idle->pid, idle->comm); /* must be idle task! */
|
|
@@ -586,8 +586,6 @@ void rcu_nmi_exit(void)
|
|
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PROVE_RCU
|
|
|
-
|
|
|
/**
|
|
|
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
|
|
*
|
|
@@ -605,7 +603,7 @@ int rcu_is_cpu_idle(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
|
|
|
|
|
-#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
|
|
|
|
|
/*
|
|
|
* Is the current CPU online? Disable preemption to avoid false positives
|
|
@@ -646,9 +644,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|
|
|
|
|
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
|
-
|
|
|
-#endif /* #ifdef CONFIG_PROVE_RCU */
|
|
|
+#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
|
|
|
|
|
|
/**
|
|
|
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
|
@@ -734,7 +730,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
int cpu;
|
|
|
long delta;
|
|
|
unsigned long flags;
|
|
|
- int ndetected;
|
|
|
+ int ndetected = 0;
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
/* Only let one CPU complain about others per time interval. */
|
|
@@ -775,7 +771,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
|
|
*/
|
|
|
rnp = rcu_get_root(rsp);
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
- ndetected = rcu_print_task_stall(rnp);
|
|
|
+ ndetected += rcu_print_task_stall(rnp);
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
|
|
|
print_cpu_stall_info_end();
|
|
@@ -938,6 +934,18 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Initialize the specified rcu_data structure's callback list to empty.
|
|
|
+ */
|
|
|
+static void init_callback_list(struct rcu_data *rdp)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ rdp->nxtlist = NULL;
|
|
|
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
|
|
|
+ rdp->nxttail[i] = &rdp->nxtlist;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Advance this CPU's callbacks, but only if the current grace period
|
|
|
* has ended. This may be called only from the CPU to whom the rdp
|
|
@@ -1330,8 +1338,6 @@ static void
|
|
|
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
|
|
struct rcu_node *rnp, struct rcu_data *rdp)
|
|
|
{
|
|
|
- int i;
|
|
|
-
|
|
|
/*
|
|
|
* Orphan the callbacks. First adjust the counts. This is safe
|
|
|
* because ->onofflock excludes _rcu_barrier()'s adoption of
|
|
@@ -1342,7 +1348,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
|
|
rsp->qlen += rdp->qlen;
|
|
|
rdp->n_cbs_orphaned += rdp->qlen;
|
|
|
rdp->qlen_lazy = 0;
|
|
|
- rdp->qlen = 0;
|
|
|
+ ACCESS_ONCE(rdp->qlen) = 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1371,9 +1377,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
|
|
|
}
|
|
|
|
|
|
/* Finally, initialize the rcu_data structure's list to empty. */
|
|
|
- rdp->nxtlist = NULL;
|
|
|
- for (i = 0; i < RCU_NEXT_SIZE; i++)
|
|
|
- rdp->nxttail[i] = &rdp->nxtlist;
|
|
|
+ init_callback_list(rdp);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1507,6 +1511,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
|
|
if (need_report & RCU_OFL_TASKS_EXP_GP)
|
|
|
rcu_report_exp_rnp(rsp, rnp, true);
|
|
|
+ WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
|
|
|
+ "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
|
|
|
+ cpu, rdp->qlen, rdp->nxtlist);
|
|
|
}
|
|
|
|
|
|
#else /* #ifdef CONFIG_HOTPLUG_CPU */
|
|
@@ -1594,7 +1601,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
}
|
|
|
smp_mb(); /* List handling before counting for rcu_barrier(). */
|
|
|
rdp->qlen_lazy -= count_lazy;
|
|
|
- rdp->qlen -= count;
|
|
|
+ ACCESS_ONCE(rdp->qlen) -= count;
|
|
|
rdp->n_cbs_invoked += count;
|
|
|
|
|
|
/* Reinstate batch limit if we have worked down the excess. */
|
|
@@ -1607,6 +1614,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
|
|
|
rdp->qlen_last_fqs_check = rdp->qlen;
|
|
|
+ WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
@@ -1858,6 +1866,56 @@ static void invoke_rcu_core(void)
|
|
|
raise_softirq(RCU_SOFTIRQ);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Handle any core-RCU processing required by a call_rcu() invocation.
|
|
|
+ */
|
|
|
+static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
+ struct rcu_head *head, unsigned long flags)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If called from an extended quiescent state, invoke the RCU
|
|
|
+ * core in order to force a re-evaluation of RCU's idleness.
|
|
|
+ */
|
|
|
+ if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
|
|
+ invoke_rcu_core();
|
|
|
+
|
|
|
+ /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
|
|
+ if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Force the grace period if too many callbacks or too long waiting.
|
|
|
+ * Enforce hysteresis, and don't invoke force_quiescent_state()
|
|
|
+ * if some other CPU has recently done so. Also, don't bother
|
|
|
+ * invoking force_quiescent_state() if the newly enqueued callback
|
|
|
+ * is the only one waiting for a grace period to complete.
|
|
|
+ */
|
|
|
+ if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
|
|
+
|
|
|
+ /* Are we ignoring a completed grace period? */
|
|
|
+ rcu_process_gp_end(rsp, rdp);
|
|
|
+ check_for_new_grace_period(rsp, rdp);
|
|
|
+
|
|
|
+ /* Start a new grace period if one not already started. */
|
|
|
+ if (!rcu_gp_in_progress(rsp)) {
|
|
|
+ unsigned long nestflag;
|
|
|
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
|
|
|
+
|
|
|
+ raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
|
|
|
+ rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
|
|
|
+ } else {
|
|
|
+ /* Give the grace period a kick. */
|
|
|
+ rdp->blimit = LONG_MAX;
|
|
|
+ if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
|
|
+ *rdp->nxttail[RCU_DONE_TAIL] != head)
|
|
|
+ force_quiescent_state(rsp, 0);
|
|
|
+ rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
+ rdp->qlen_last_fqs_check = rdp->qlen;
|
|
|
+ }
|
|
|
+ } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
|
|
+ force_quiescent_state(rsp, 1);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
|
|
struct rcu_state *rsp, bool lazy)
|
|
@@ -1882,7 +1940,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
|
|
|
/* Add the callback to our list. */
|
|
|
- rdp->qlen++;
|
|
|
+ ACCESS_ONCE(rdp->qlen)++;
|
|
|
if (lazy)
|
|
|
rdp->qlen_lazy++;
|
|
|
else
|
|
@@ -1897,43 +1955,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
|
|
else
|
|
|
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
|
|
|
|
|
|
- /* If interrupts were disabled, don't dive into RCU core. */
|
|
|
- if (irqs_disabled_flags(flags)) {
|
|
|
- local_irq_restore(flags);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * Force the grace period if too many callbacks or too long waiting.
|
|
|
- * Enforce hysteresis, and don't invoke force_quiescent_state()
|
|
|
- * if some other CPU has recently done so. Also, don't bother
|
|
|
- * invoking force_quiescent_state() if the newly enqueued callback
|
|
|
- * is the only one waiting for a grace period to complete.
|
|
|
- */
|
|
|
- if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
|
|
-
|
|
|
- /* Are we ignoring a completed grace period? */
|
|
|
- rcu_process_gp_end(rsp, rdp);
|
|
|
- check_for_new_grace_period(rsp, rdp);
|
|
|
-
|
|
|
- /* Start a new grace period if one not already started. */
|
|
|
- if (!rcu_gp_in_progress(rsp)) {
|
|
|
- unsigned long nestflag;
|
|
|
- struct rcu_node *rnp_root = rcu_get_root(rsp);
|
|
|
-
|
|
|
- raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
|
|
|
- rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
|
|
|
- } else {
|
|
|
- /* Give the grace period a kick. */
|
|
|
- rdp->blimit = LONG_MAX;
|
|
|
- if (rsp->n_force_qs == rdp->n_force_qs_snap &&
|
|
|
- *rdp->nxttail[RCU_DONE_TAIL] != head)
|
|
|
- force_quiescent_state(rsp, 0);
|
|
|
- rdp->n_force_qs_snap = rsp->n_force_qs;
|
|
|
- rdp->qlen_last_fqs_check = rdp->qlen;
|
|
|
- }
|
|
|
- } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
|
|
|
- force_quiescent_state(rsp, 1);
|
|
|
+ /* Go handle any RCU core processing required. */
|
|
|
+ __call_rcu_core(rsp, rdp, head, flags);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -1963,28 +1986,16 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
|
|
|
* occasionally incorrectly indicate that there are multiple CPUs online
|
|
|
* when there was in fact only one the whole time, as this just adds
|
|
|
* some overhead: RCU still operates correctly.
|
|
|
- *
|
|
|
- * Of course, sampling num_online_cpus() with preemption enabled can
|
|
|
- * give erroneous results if there are concurrent CPU-hotplug operations.
|
|
|
- * For example, given a demonic sequence of preemptions in num_online_cpus()
|
|
|
- * and CPU-hotplug operations, there could be two or more CPUs online at
|
|
|
- * all times, but num_online_cpus() might well return one (or even zero).
|
|
|
- *
|
|
|
- * However, all such demonic sequences require at least one CPU-offline
|
|
|
- * operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer
|
|
|
- * is only a problem if there is an RCU read-side critical section executing
|
|
|
- * throughout. But RCU-sched and RCU-bh read-side critical sections
|
|
|
- * disable either preemption or bh, which prevents a CPU from going offline.
|
|
|
- * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
|
|
|
- * that there is only one CPU when in fact there was more than one throughout
|
|
|
- * is when there were no RCU readers in the system. If there are no
|
|
|
- * RCU readers, the grace period by definition can be of zero length,
|
|
|
- * regardless of the number of online CPUs.
|
|
|
*/
|
|
|
static inline int rcu_blocking_is_gp(void)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
might_sleep(); /* Check for RCU read-side critical section. */
|
|
|
- return num_online_cpus() <= 1;
|
|
|
+ preempt_disable();
|
|
|
+ ret = num_online_cpus() <= 1;
|
|
|
+ preempt_enable();
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2473,18 +2484,15 @@ static void __init
|
|
|
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- int i;
|
|
|
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
|
|
|
|
|
/* Set up local state, ensuring consistent view of global state. */
|
|
|
raw_spin_lock_irqsave(&rnp->lock, flags);
|
|
|
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
|
|
|
- rdp->nxtlist = NULL;
|
|
|
- for (i = 0; i < RCU_NEXT_SIZE; i++)
|
|
|
- rdp->nxttail[i] = &rdp->nxtlist;
|
|
|
+ init_callback_list(rdp);
|
|
|
rdp->qlen_lazy = 0;
|
|
|
- rdp->qlen = 0;
|
|
|
+ ACCESS_ONCE(rdp->qlen) = 0;
|
|
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
|
|
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|