|
@@ -61,13 +61,6 @@
|
|
|
|
|
|
#include "rcu.h"
|
|
|
|
|
|
-/*
|
|
|
- * Strings used in tracepoints need to be exported via the
|
|
|
- * tracing system such that tools like perf and trace-cmd can
|
|
|
- * translate the string address pointers to actual text.
|
|
|
- */
|
|
|
-#define TPS(x) tracepoint_string(x)
|
|
|
-
|
|
|
/* Data structures. */
|
|
|
|
|
|
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
|
@@ -222,7 +215,7 @@ void rcu_note_context_switch(int cpu)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
|
|
|
|
|
-DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
|
|
.dynticks = ATOMIC_INIT(1),
|
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
@@ -371,7 +364,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
{
|
|
|
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
|
- struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
+ struct task_struct *idle __maybe_unused =
|
|
|
+ idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
|
|
|
ftrace_dump(DUMP_ORIG);
|
|
@@ -407,7 +401,7 @@ static void rcu_eqs_enter(bool user)
|
|
|
long long oldval;
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
|
@@ -435,7 +429,7 @@ void rcu_idle_enter(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_enter(false);
|
|
|
- rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
@@ -478,7 +472,7 @@ void rcu_irq_exit(void)
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
rdtp->dynticks_nesting--;
|
|
|
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
|
|
@@ -508,7 +502,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
rcu_cleanup_after_idle(smp_processor_id());
|
|
|
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
|
- struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
+ struct task_struct *idle __maybe_unused =
|
|
|
+ idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick(TPS("Error on exit: not idle task"),
|
|
|
oldval, rdtp->dynticks_nesting);
|
|
@@ -528,7 +523,7 @@ static void rcu_eqs_exit(bool user)
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
long long oldval;
|
|
|
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
WARN_ON_ONCE(oldval < 0);
|
|
|
if (oldval & DYNTICK_TASK_NEST_MASK)
|
|
@@ -555,7 +550,7 @@ void rcu_idle_exit(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_exit(false);
|
|
|
- rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
@@ -599,7 +594,7 @@ void rcu_irq_enter(void)
|
|
|
long long oldval;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
rdtp->dynticks_nesting++;
|
|
|
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
|
|
@@ -620,7 +615,7 @@ void rcu_irq_enter(void)
|
|
|
*/
|
|
|
void rcu_nmi_enter(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
if (rdtp->dynticks_nmi_nesting == 0 &&
|
|
|
(atomic_read(&rdtp->dynticks) & 0x1))
|
|
@@ -642,7 +637,7 @@ void rcu_nmi_enter(void)
|
|
|
*/
|
|
|
void rcu_nmi_exit(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
if (rdtp->dynticks_nmi_nesting == 0 ||
|
|
|
--rdtp->dynticks_nmi_nesting != 0)
|
|
@@ -665,7 +660,7 @@ int rcu_is_cpu_idle(void)
|
|
|
int ret;
|
|
|
|
|
|
preempt_disable();
|
|
|
- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
|
|
+ ret = (atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1) == 0;
|
|
|
preempt_enable();
|
|
|
return ret;
|
|
|
}
|
|
@@ -703,7 +698,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|
|
if (in_nmi())
|
|
|
return 1;
|
|
|
preempt_disable();
|
|
|
- rdp = &__get_cpu_var(rcu_sched_data);
|
|
|
+ rdp = this_cpu_ptr(&rcu_sched_data);
|
|
|
rnp = rdp->mynode;
|
|
|
ret = (rdp->grpmask & rnp->qsmaskinit) ||
|
|
|
!rcu_scheduler_fully_active;
|
|
@@ -723,7 +718,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|
|
*/
|
|
|
static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
{
|
|
|
- return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
|
|
|
+ return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -802,8 +797,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|
|
|
|
|
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
{
|
|
|
- rsp->gp_start = jiffies;
|
|
|
- rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
|
|
|
+ unsigned long j = ACCESS_ONCE(jiffies);
|
|
|
+
|
|
|
+ rsp->gp_start = j;
|
|
|
+ smp_wmb(); /* Record start time before stall time. */
|
|
|
+ rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -932,17 +930,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
|
|
|
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
{
|
|
|
+ unsigned long completed;
|
|
|
+ unsigned long gpnum;
|
|
|
+ unsigned long gps;
|
|
|
unsigned long j;
|
|
|
unsigned long js;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
- if (rcu_cpu_stall_suppress)
|
|
|
+ if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
|
|
|
return;
|
|
|
j = ACCESS_ONCE(jiffies);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Lots of memory barriers to reject false positives.
|
|
|
+ *
|
|
|
+ * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
|
|
|
+ * then rsp->gp_start, and finally rsp->completed. These values
|
|
|
+ * are updated in the opposite order with memory barriers (or
|
|
|
+ * equivalent) during grace-period initialization and cleanup.
|
|
|
+ * Now, a false positive can occur if we get an new value of
|
|
|
+ * rsp->gp_start and a old value of rsp->jiffies_stall. But given
|
|
|
+ * the memory barriers, the only way that this can happen is if one
|
|
|
+ * grace period ends and another starts between these two fetches.
|
|
|
+ * Detect this by comparing rsp->completed with the previous fetch
|
|
|
+ * from rsp->gpnum.
|
|
|
+ *
|
|
|
+ * Given this check, comparisons of jiffies, rsp->jiffies_stall,
|
|
|
+ * and rsp->gp_start suffice to forestall false positives.
|
|
|
+ */
|
|
|
+ gpnum = ACCESS_ONCE(rsp->gpnum);
|
|
|
+ smp_rmb(); /* Pick up ->gpnum first... */
|
|
|
js = ACCESS_ONCE(rsp->jiffies_stall);
|
|
|
+ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
|
|
|
+ gps = ACCESS_ONCE(rsp->gp_start);
|
|
|
+ smp_rmb(); /* ...and finally ->gp_start before ->completed. */
|
|
|
+ completed = ACCESS_ONCE(rsp->completed);
|
|
|
+ if (ULONG_CMP_GE(completed, gpnum) ||
|
|
|
+ ULONG_CMP_LT(j, js) ||
|
|
|
+ ULONG_CMP_GE(gps, js))
|
|
|
+ return; /* No stall or GP completed since entering function. */
|
|
|
rnp = rdp->mynode;
|
|
|
if (rcu_gp_in_progress(rsp) &&
|
|
|
- (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
|
|
|
+ (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
|
|
|
|
|
|
/* We haven't checked in, so go dump stack. */
|
|
|
print_cpu_stall(rsp);
|
|
@@ -1315,9 +1344,10 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
/* Advance to a new grace period and initialize state. */
|
|
|
+ record_gp_stall_check_time(rsp);
|
|
|
+ smp_wmb(); /* Record GP times before starting GP. */
|
|
|
rsp->gpnum++;
|
|
|
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
|
|
- record_gp_stall_check_time(rsp);
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
|
|
|
|
/* Exclude any concurrent CPU-hotplug operations. */
|
|
@@ -1366,7 +1396,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
/*
|
|
|
* Do one round of quiescent-state forcing.
|
|
|
*/
|
|
|
-int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
|
|
+static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
|
|
{
|
|
|
int fqs_state = fqs_state_in;
|
|
|
bool isidle = false;
|
|
@@ -1452,7 +1482,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
|
|
|
if (cpu_needs_another_gp(rsp, rdp))
|
|
|
- rsp->gp_flags = 1;
|
|
|
+ rsp->gp_flags = RCU_GP_FLAG_INIT;
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
|
}
|
|
|
|
|
@@ -2725,10 +2755,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
|
|
|
|
|
for_each_rcu_flavor(rsp) {
|
|
|
rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- if (rdp->qlen != rdp->qlen_lazy)
|
|
|
+ if (!rdp->nxtlist)
|
|
|
+ continue;
|
|
|
+ hc = true;
|
|
|
+ if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
|
|
|
al = false;
|
|
|
- if (rdp->nxtlist)
|
|
|
- hc = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
if (all_lazy)
|
|
|
*all_lazy = al;
|
|
@@ -3295,8 +3328,8 @@ void __init rcu_init(void)
|
|
|
|
|
|
rcu_bootup_announce();
|
|
|
rcu_init_geometry();
|
|
|
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
|
|
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
|
|
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
|
|
__rcu_init_preempt();
|
|
|
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
|
|
|