|
@@ -41,6 +41,7 @@
|
|
|
#include <linux/export.h>
|
|
|
#include <linux/completion.h>
|
|
|
#include <linux/moduleparam.h>
|
|
|
+#include <linux/module.h>
|
|
|
#include <linux/percpu.h>
|
|
|
#include <linux/notifier.h>
|
|
|
#include <linux/cpu.h>
|
|
@@ -56,17 +57,16 @@
|
|
|
#include <linux/ftrace_event.h>
|
|
|
#include <linux/suspend.h>
|
|
|
|
|
|
-#include "rcutree.h"
|
|
|
+#include "tree.h"
|
|
|
#include <trace/events/rcu.h>
|
|
|
|
|
|
#include "rcu.h"
|
|
|
|
|
|
-/*
|
|
|
- * Strings used in tracepoints need to be exported via the
|
|
|
- * tracing system such that tools like perf and trace-cmd can
|
|
|
- * translate the string address pointers to actual text.
|
|
|
- */
|
|
|
-#define TPS(x) tracepoint_string(x)
|
|
|
+MODULE_ALIAS("rcutree");
|
|
|
+#ifdef MODULE_PARAM_PREFIX
|
|
|
+#undef MODULE_PARAM_PREFIX
|
|
|
+#endif
|
|
|
+#define MODULE_PARAM_PREFIX "rcutree."
|
|
|
|
|
|
/* Data structures. */
|
|
|
|
|
@@ -222,7 +222,7 @@ void rcu_note_context_switch(int cpu)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
|
|
|
|
|
-DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
|
|
.dynticks = ATOMIC_INIT(1),
|
|
|
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
|
@@ -371,7 +371,8 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
{
|
|
|
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
|
- struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
+ struct task_struct *idle __maybe_unused =
|
|
|
+ idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
|
|
|
ftrace_dump(DUMP_ORIG);
|
|
@@ -407,7 +408,7 @@ static void rcu_eqs_enter(bool user)
|
|
|
long long oldval;
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
|
@@ -435,7 +436,7 @@ void rcu_idle_enter(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_enter(false);
|
|
|
- rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
@@ -478,7 +479,7 @@ void rcu_irq_exit(void)
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
rdtp->dynticks_nesting--;
|
|
|
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
|
|
@@ -508,7 +509,8 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
rcu_cleanup_after_idle(smp_processor_id());
|
|
|
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
|
|
|
if (!user && !is_idle_task(current)) {
|
|
|
- struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
+ struct task_struct *idle __maybe_unused =
|
|
|
+ idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick(TPS("Error on exit: not idle task"),
|
|
|
oldval, rdtp->dynticks_nesting);
|
|
@@ -528,7 +530,7 @@ static void rcu_eqs_exit(bool user)
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
long long oldval;
|
|
|
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
WARN_ON_ONCE(oldval < 0);
|
|
|
if (oldval & DYNTICK_TASK_NEST_MASK)
|
|
@@ -555,7 +557,7 @@ void rcu_idle_exit(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rcu_eqs_exit(false);
|
|
|
- rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
|
|
|
+ rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
@@ -599,7 +601,7 @@ void rcu_irq_enter(void)
|
|
|
long long oldval;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
oldval = rdtp->dynticks_nesting;
|
|
|
rdtp->dynticks_nesting++;
|
|
|
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
|
|
@@ -620,7 +622,7 @@ void rcu_irq_enter(void)
|
|
|
*/
|
|
|
void rcu_nmi_enter(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
if (rdtp->dynticks_nmi_nesting == 0 &&
|
|
|
(atomic_read(&rdtp->dynticks) & 0x1))
|
|
@@ -642,7 +644,7 @@ void rcu_nmi_enter(void)
|
|
|
*/
|
|
|
void rcu_nmi_exit(void)
|
|
|
{
|
|
|
- struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
|
|
|
|
|
if (rdtp->dynticks_nmi_nesting == 0 ||
|
|
|
--rdtp->dynticks_nmi_nesting != 0)
|
|
@@ -655,21 +657,34 @@ void rcu_nmi_exit(void)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
|
|
+ * __rcu_is_watching - are RCU read-side critical sections safe?
|
|
|
+ *
|
|
|
+ * Return true if RCU is watching the running CPU, which means that
|
|
|
+ * this CPU can safely enter RCU read-side critical sections. Unlike
|
|
|
+ * rcu_is_watching(), the caller of __rcu_is_watching() must have at
|
|
|
+ * least disabled preemption.
|
|
|
+ */
|
|
|
+bool __rcu_is_watching(void)
|
|
|
+{
|
|
|
+ return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_is_watching - see if RCU thinks that the current CPU is idle
|
|
|
*
|
|
|
* If the current CPU is in its idle loop and is neither in an interrupt
|
|
|
* or NMI handler, return true.
|
|
|
*/
|
|
|
-int rcu_is_cpu_idle(void)
|
|
|
+bool rcu_is_watching(void)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
preempt_disable();
|
|
|
- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
|
|
+ ret = __rcu_is_watching();
|
|
|
preempt_enable();
|
|
|
return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(rcu_is_cpu_idle);
|
|
|
+EXPORT_SYMBOL_GPL(rcu_is_watching);
|
|
|
|
|
|
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
|
|
|
|
@@ -703,7 +718,7 @@ bool rcu_lockdep_current_cpu_online(void)
|
|
|
if (in_nmi())
|
|
|
return 1;
|
|
|
preempt_disable();
|
|
|
- rdp = &__get_cpu_var(rcu_sched_data);
|
|
|
+ rdp = this_cpu_ptr(&rcu_sched_data);
|
|
|
rnp = rdp->mynode;
|
|
|
ret = (rdp->grpmask & rnp->qsmaskinit) ||
|
|
|
!rcu_scheduler_fully_active;
|
|
@@ -723,7 +738,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
|
|
|
*/
|
|
|
static int rcu_is_cpu_rrupt_from_idle(void)
|
|
|
{
|
|
|
- return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
|
|
|
+ return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -802,8 +817,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
|
|
|
|
|
static void record_gp_stall_check_time(struct rcu_state *rsp)
|
|
|
{
|
|
|
- rsp->gp_start = jiffies;
|
|
|
- rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
|
|
|
+ unsigned long j = ACCESS_ONCE(jiffies);
|
|
|
+
|
|
|
+ rsp->gp_start = j;
|
|
|
+ smp_wmb(); /* Record start time before stall time. */
|
|
|
+ rsp->jiffies_stall = j + rcu_jiffies_till_stall_check();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -932,17 +950,48 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|
|
|
|
|
static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
{
|
|
|
+ unsigned long completed;
|
|
|
+ unsigned long gpnum;
|
|
|
+ unsigned long gps;
|
|
|
unsigned long j;
|
|
|
unsigned long js;
|
|
|
struct rcu_node *rnp;
|
|
|
|
|
|
- if (rcu_cpu_stall_suppress)
|
|
|
+ if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
|
|
|
return;
|
|
|
j = ACCESS_ONCE(jiffies);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Lots of memory barriers to reject false positives.
|
|
|
+ *
|
|
|
+ * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
|
|
|
+ * then rsp->gp_start, and finally rsp->completed. These values
|
|
|
+ * are updated in the opposite order with memory barriers (or
|
|
|
+ * equivalent) during grace-period initialization and cleanup.
|
|
|
+ * Now, a false positive can occur if we get an new value of
|
|
|
+ * rsp->gp_start and a old value of rsp->jiffies_stall. But given
|
|
|
+ * the memory barriers, the only way that this can happen is if one
|
|
|
+ * grace period ends and another starts between these two fetches.
|
|
|
+ * Detect this by comparing rsp->completed with the previous fetch
|
|
|
+ * from rsp->gpnum.
|
|
|
+ *
|
|
|
+ * Given this check, comparisons of jiffies, rsp->jiffies_stall,
|
|
|
+ * and rsp->gp_start suffice to forestall false positives.
|
|
|
+ */
|
|
|
+ gpnum = ACCESS_ONCE(rsp->gpnum);
|
|
|
+ smp_rmb(); /* Pick up ->gpnum first... */
|
|
|
js = ACCESS_ONCE(rsp->jiffies_stall);
|
|
|
+ smp_rmb(); /* ...then ->jiffies_stall before the rest... */
|
|
|
+ gps = ACCESS_ONCE(rsp->gp_start);
|
|
|
+ smp_rmb(); /* ...and finally ->gp_start before ->completed. */
|
|
|
+ completed = ACCESS_ONCE(rsp->completed);
|
|
|
+ if (ULONG_CMP_GE(completed, gpnum) ||
|
|
|
+ ULONG_CMP_LT(j, js) ||
|
|
|
+ ULONG_CMP_GE(gps, js))
|
|
|
+ return; /* No stall or GP completed since entering function. */
|
|
|
rnp = rdp->mynode;
|
|
|
if (rcu_gp_in_progress(rsp) &&
|
|
|
- (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
|
|
|
+ (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
|
|
|
|
|
|
/* We haven't checked in, so go dump stack. */
|
|
|
print_cpu_stall(rsp);
|
|
@@ -1297,7 +1346,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Initialize a new grace period.
|
|
|
+ * Initialize a new grace period. Return 0 if no grace period required.
|
|
|
*/
|
|
|
static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
{
|
|
@@ -1306,18 +1355,27 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
|
|
|
rcu_bind_gp_kthread();
|
|
|
raw_spin_lock_irq(&rnp->lock);
|
|
|
+ if (rsp->gp_flags == 0) {
|
|
|
+ /* Spurious wakeup, tell caller to go back to sleep. */
|
|
|
+ raw_spin_unlock_irq(&rnp->lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
|
|
|
|
|
|
- if (rcu_gp_in_progress(rsp)) {
|
|
|
- /* Grace period already in progress, don't start another. */
|
|
|
+ if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
|
|
|
+ /*
|
|
|
+ * Grace period already in progress, don't start another.
|
|
|
+ * Not supposed to be able to happen.
|
|
|
+ */
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/* Advance to a new grace period and initialize state. */
|
|
|
+ record_gp_stall_check_time(rsp);
|
|
|
+ smp_wmb(); /* Record GP times before starting GP. */
|
|
|
rsp->gpnum++;
|
|
|
trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
|
|
|
- record_gp_stall_check_time(rsp);
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
|
|
|
|
/* Exclude any concurrent CPU-hotplug operations. */
|
|
@@ -1366,7 +1424,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
|
|
/*
|
|
|
* Do one round of quiescent-state forcing.
|
|
|
*/
|
|
|
-int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
|
|
+static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
|
|
{
|
|
|
int fqs_state = fqs_state_in;
|
|
|
bool isidle = false;
|
|
@@ -1451,8 +1509,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
rsp->fqs_state = RCU_GP_IDLE;
|
|
|
rdp = this_cpu_ptr(rsp->rda);
|
|
|
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
|
|
|
- if (cpu_needs_another_gp(rsp, rdp))
|
|
|
- rsp->gp_flags = 1;
|
|
|
+ if (cpu_needs_another_gp(rsp, rdp)) {
|
|
|
+ rsp->gp_flags = RCU_GP_FLAG_INIT;
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("newreq"));
|
|
|
+ }
|
|
|
raw_spin_unlock_irq(&rnp->lock);
|
|
|
}
|
|
|
|
|
@@ -1462,6 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|
|
static int __noreturn rcu_gp_kthread(void *arg)
|
|
|
{
|
|
|
int fqs_state;
|
|
|
+ int gf;
|
|
|
unsigned long j;
|
|
|
int ret;
|
|
|
struct rcu_state *rsp = arg;
|
|
@@ -1471,14 +1534,19 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|
|
|
|
|
/* Handle grace-period start. */
|
|
|
for (;;) {
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("reqwait"));
|
|
|
wait_event_interruptible(rsp->gp_wq,
|
|
|
- rsp->gp_flags &
|
|
|
+ ACCESS_ONCE(rsp->gp_flags) &
|
|
|
RCU_GP_FLAG_INIT);
|
|
|
- if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
|
|
|
- rcu_gp_init(rsp))
|
|
|
+ if (rcu_gp_init(rsp))
|
|
|
break;
|
|
|
cond_resched();
|
|
|
flush_signals(current);
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("reqwaitsig"));
|
|
|
}
|
|
|
|
|
|
/* Handle quiescent-state forcing. */
|
|
@@ -1488,10 +1556,16 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|
|
j = HZ;
|
|
|
jiffies_till_first_fqs = HZ;
|
|
|
}
|
|
|
+ ret = 0;
|
|
|
for (;;) {
|
|
|
- rsp->jiffies_force_qs = jiffies + j;
|
|
|
+ if (!ret)
|
|
|
+ rsp->jiffies_force_qs = jiffies + j;
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("fqswait"));
|
|
|
ret = wait_event_interruptible_timeout(rsp->gp_wq,
|
|
|
- (rsp->gp_flags & RCU_GP_FLAG_FQS) ||
|
|
|
+ ((gf = ACCESS_ONCE(rsp->gp_flags)) &
|
|
|
+ RCU_GP_FLAG_FQS) ||
|
|
|
(!ACCESS_ONCE(rnp->qsmask) &&
|
|
|
!rcu_preempt_blocked_readers_cgp(rnp)),
|
|
|
j);
|
|
@@ -1500,13 +1574,23 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
|
|
!rcu_preempt_blocked_readers_cgp(rnp))
|
|
|
break;
|
|
|
/* If time for quiescent-state forcing, do it. */
|
|
|
- if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
|
|
|
+ if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
|
|
|
+ (gf & RCU_GP_FLAG_FQS)) {
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("fqsstart"));
|
|
|
fqs_state = rcu_gp_fqs(rsp, fqs_state);
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("fqsend"));
|
|
|
cond_resched();
|
|
|
} else {
|
|
|
/* Deal with stray signal. */
|
|
|
cond_resched();
|
|
|
flush_signals(current);
|
|
|
+ trace_rcu_grace_period(rsp->name,
|
|
|
+ ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("fqswaitsig"));
|
|
|
}
|
|
|
j = jiffies_till_next_fqs;
|
|
|
if (j > HZ) {
|
|
@@ -1554,6 +1638,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
|
|
return;
|
|
|
}
|
|
|
rsp->gp_flags = RCU_GP_FLAG_INIT;
|
|
|
+ trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
|
|
|
+ TPS("newreq"));
|
|
|
|
|
|
/*
|
|
|
* We can't do wakeups while holding the rnp->lock, as that
|
|
@@ -2255,7 +2341,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|
|
* If called from an extended quiescent state, invoke the RCU
|
|
|
* core in order to force a re-evaluation of RCU's idleness.
|
|
|
*/
|
|
|
- if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
|
|
+ if (!rcu_is_watching() && cpu_online(smp_processor_id()))
|
|
|
invoke_rcu_core();
|
|
|
|
|
|
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
|
@@ -2725,10 +2811,13 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
|
|
|
|
|
for_each_rcu_flavor(rsp) {
|
|
|
rdp = per_cpu_ptr(rsp->rda, cpu);
|
|
|
- if (rdp->qlen != rdp->qlen_lazy)
|
|
|
+ if (!rdp->nxtlist)
|
|
|
+ continue;
|
|
|
+ hc = true;
|
|
|
+ if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
|
|
|
al = false;
|
|
|
- if (rdp->nxtlist)
|
|
|
- hc = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
if (all_lazy)
|
|
|
*all_lazy = al;
|
|
@@ -3216,7 +3305,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
|
|
|
|
|
/*
|
|
|
* Compute the rcu_node tree geometry from kernel parameters. This cannot
|
|
|
- * replace the definitions in rcutree.h because those are needed to size
|
|
|
+ * replace the definitions in tree.h because those are needed to size
|
|
|
* the ->node array in the rcu_state structure.
|
|
|
*/
|
|
|
static void __init rcu_init_geometry(void)
|
|
@@ -3295,8 +3384,8 @@ void __init rcu_init(void)
|
|
|
|
|
|
rcu_bootup_announce();
|
|
|
rcu_init_geometry();
|
|
|
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
|
|
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
|
|
|
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
|
|
|
__rcu_init_preempt();
|
|
|
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
|
|
|
|
@@ -3311,4 +3400,4 @@ void __init rcu_init(void)
|
|
|
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
|
|
}
|
|
|
|
|
|
-#include "rcutree_plugin.h"
|
|
|
+#include "tree_plugin.h"
|