|
@@ -206,6 +206,9 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
|
|
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
|
|
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
|
|
.dynticks = ATOMIC_INIT(1),
|
|
|
+#if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE)
|
|
|
+ .ignore_user_qs = true,
|
|
|
+#endif
|
|
|
};
|
|
|
|
|
|
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
|
|
@@ -322,16 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
|
|
|
+ * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
|
|
|
*
|
|
|
* If the new value of the ->dynticks_nesting counter now is zero,
|
|
|
* we really have entered idle, and must do the appropriate accounting.
|
|
|
* The caller must have disabled interrupts.
|
|
|
*/
|
|
|
-static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
+static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
+ bool user)
|
|
|
{
|
|
|
trace_rcu_dyntick("Start", oldval, 0);
|
|
|
- if (!is_idle_task(current)) {
|
|
|
+ if (!user && !is_idle_task(current)) {
|
|
|
struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
|
|
@@ -348,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
|
|
|
|
|
/*
|
|
|
- * The idle task is not permitted to enter the idle loop while
|
|
|
+ * It is illegal to enter an extended quiescent state while
|
|
|
* in an RCU read-side critical section.
|
|
|
*/
|
|
|
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
|
|
@@ -359,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
"Illegal idle entry in RCU-sched read-side critical section.");
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Enter an RCU extended quiescent state, which can be either the
|
|
|
+ * idle loop or adaptive-tickless usermode execution.
|
|
|
+ */
|
|
|
+static void rcu_eqs_enter(bool user)
|
|
|
+{
|
|
|
+ long long oldval;
|
|
|
+ struct rcu_dynticks *rdtp;
|
|
|
+
|
|
|
+ rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ oldval = rdtp->dynticks_nesting;
|
|
|
+ WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
+ if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
|
|
+ rdtp->dynticks_nesting = 0;
|
|
|
+ else
|
|
|
+ rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
|
|
+ rcu_eqs_enter_common(rdtp, oldval, user);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* rcu_idle_enter - inform RCU that current CPU is entering idle
|
|
|
*
|
|
@@ -374,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
void rcu_idle_enter(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- long long oldval;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ rcu_eqs_enter(false);
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_USER_QS
|
|
|
+/**
|
|
|
+ * rcu_user_enter - inform RCU that we are resuming userspace.
|
|
|
+ *
|
|
|
+ * Enter RCU idle mode right before resuming userspace. No use of RCU
|
|
|
+ * is permitted between this call and rcu_user_exit(). This way the
|
|
|
+ * CPU doesn't need to maintain the tick for RCU maintenance purposes
|
|
|
+ * when the CPU runs in userspace.
|
|
|
+ */
|
|
|
+void rcu_user_enter(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
|
|
|
+ /*
|
|
|
+ * Some contexts may involve an exception occuring in an irq,
|
|
|
+ * leading to that nesting:
|
|
|
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
|
|
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
|
|
+ * helpers are enough to protect RCU uses inside the exception. So
|
|
|
+ * just return immediately if we detect we are in an IRQ.
|
|
|
+ */
|
|
|
+ if (in_interrupt())
|
|
|
+ return;
|
|
|
+
|
|
|
+ WARN_ON_ONCE(!current->mm);
|
|
|
+
|
|
|
local_irq_save(flags);
|
|
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
- oldval = rdtp->dynticks_nesting;
|
|
|
- WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
|
|
- if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
|
|
|
- rdtp->dynticks_nesting = 0;
|
|
|
- else
|
|
|
- rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
|
|
- rcu_idle_enter_common(rdtp, oldval);
|
|
|
+ if (!rdtp->ignore_user_qs && !rdtp->in_user) {
|
|
|
+ rdtp->in_user = true;
|
|
|
+ rcu_eqs_enter(true);
|
|
|
+ }
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
|
|
|
+ * after the current irq returns.
|
|
|
+ *
|
|
|
+ * This is similar to rcu_user_enter() but in the context of a non-nesting
|
|
|
+ * irq. After this call, RCU enters into idle mode when the interrupt
|
|
|
+ * returns.
|
|
|
+ */
|
|
|
+void rcu_user_enter_after_irq(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct rcu_dynticks *rdtp;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ /* Ensure this irq is interrupting a non-idle RCU state. */
|
|
|
+ WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
|
|
|
+ rdtp->dynticks_nesting = 1;
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+#endif /* CONFIG_RCU_USER_QS */
|
|
|
|
|
|
/**
|
|
|
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
|
|
@@ -420,18 +492,19 @@ void rcu_irq_exit(void)
|
|
|
if (rdtp->dynticks_nesting)
|
|
|
trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
|
|
|
else
|
|
|
- rcu_idle_enter_common(rdtp, oldval);
|
|
|
+ rcu_eqs_enter_common(rdtp, oldval, true);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
|
|
|
+ * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
|
|
|
*
|
|
|
* If the new value of the ->dynticks_nesting counter was previously zero,
|
|
|
* we really have exited idle, and must do the appropriate accounting.
|
|
|
* The caller must have disabled interrupts.
|
|
|
*/
|
|
|
-static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
+static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
|
|
+ int user)
|
|
|
{
|
|
|
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
|
|
|
atomic_inc(&rdtp->dynticks);
|
|
@@ -440,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
|
|
rcu_cleanup_after_idle(smp_processor_id());
|
|
|
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
|
|
|
- if (!is_idle_task(current)) {
|
|
|
+ if (!user && !is_idle_task(current)) {
|
|
|
struct task_struct *idle = idle_task(smp_processor_id());
|
|
|
|
|
|
trace_rcu_dyntick("Error on exit: not idle task",
|
|
@@ -452,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Exit an RCU extended quiescent state, which can be either the
|
|
|
+ * idle loop or adaptive-tickless usermode execution.
|
|
|
+ */
|
|
|
+static void rcu_eqs_exit(bool user)
|
|
|
+{
|
|
|
+ struct rcu_dynticks *rdtp;
|
|
|
+ long long oldval;
|
|
|
+
|
|
|
+ rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ oldval = rdtp->dynticks_nesting;
|
|
|
+ WARN_ON_ONCE(oldval < 0);
|
|
|
+ if (oldval & DYNTICK_TASK_NEST_MASK)
|
|
|
+ rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
|
|
+ else
|
|
|
+ rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
+ rcu_eqs_exit_common(rdtp, oldval, user);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* rcu_idle_exit - inform RCU that current CPU is leaving idle
|
|
|
*
|
|
@@ -464,23 +556,69 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
|
|
|
* now starting.
|
|
|
*/
|
|
|
void rcu_idle_exit(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ rcu_eqs_exit(false);
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
|
+
|
|
|
+#ifdef CONFIG_RCU_USER_QS
|
|
|
+/**
|
|
|
+ * rcu_user_exit - inform RCU that we are exiting userspace.
|
|
|
+ *
|
|
|
+ * Exit RCU idle mode while entering the kernel because it can
|
|
|
+ * run a RCU read side critical section anytime.
|
|
|
+ */
|
|
|
+void rcu_user_exit(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rcu_dynticks *rdtp;
|
|
|
- long long oldval;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Some contexts may involve an exception occuring in an irq,
|
|
|
+ * leading to that nesting:
|
|
|
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
|
|
|
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
|
|
|
+ * helpers are enough to protect RCU uses inside the exception. So
|
|
|
+ * just return immediately if we detect we are in an IRQ.
|
|
|
+ */
|
|
|
+ if (in_interrupt())
|
|
|
+ return;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
- oldval = rdtp->dynticks_nesting;
|
|
|
- WARN_ON_ONCE(oldval < 0);
|
|
|
- if (oldval & DYNTICK_TASK_NEST_MASK)
|
|
|
- rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
|
|
- else
|
|
|
- rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
|
|
- rcu_idle_exit_common(rdtp, oldval);
|
|
|
+ if (rdtp->in_user) {
|
|
|
+ rdtp->in_user = false;
|
|
|
+ rcu_eqs_exit(true);
|
|
|
+ }
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
|
|
+
|
|
|
+/**
|
|
|
+ * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
|
|
|
+ * idle mode after the current non-nesting irq returns.
|
|
|
+ *
|
|
|
+ * This is similar to rcu_user_exit() but in the context of an irq.
|
|
|
+ * This is called when the irq has interrupted a userspace RCU idle mode
|
|
|
+ * context. When the current non-nesting interrupt returns after this call,
|
|
|
+ * the CPU won't restore the RCU idle mode.
|
|
|
+ */
|
|
|
+void rcu_user_exit_after_irq(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct rcu_dynticks *rdtp;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ /* Ensure we are interrupting an RCU idle mode. */
|
|
|
+ WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
|
|
|
+ rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
|
|
|
+ local_irq_restore(flags);
|
|
|
+}
|
|
|
+#endif /* CONFIG_RCU_USER_QS */
|
|
|
|
|
|
/**
|
|
|
* rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
|
|
@@ -515,7 +653,7 @@ void rcu_irq_enter(void)
|
|
|
if (oldval)
|
|
|
trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
|
|
|
else
|
|
|
- rcu_idle_exit_common(rdtp, oldval);
|
|
|
+ rcu_eqs_exit_common(rdtp, oldval, true);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -579,6 +717,21 @@ int rcu_is_cpu_idle(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
|
|
|
|
|
+#ifdef CONFIG_RCU_USER_QS
|
|
|
+void rcu_user_hooks_switch(struct task_struct *prev,
|
|
|
+ struct task_struct *next)
|
|
|
+{
|
|
|
+ struct rcu_dynticks *rdtp;
|
|
|
+
|
|
|
+ /* Interrupts are disabled in context switch */
|
|
|
+ rdtp = &__get_cpu_var(rcu_dynticks);
|
|
|
+ if (!rdtp->ignore_user_qs) {
|
|
|
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
|
+ set_tsk_thread_flag(next, TIF_NOHZ);
|
|
|
+ }
|
|
|
+}
|
|
|
+#endif /* #ifdef CONFIG_RCU_USER_QS */
|
|
|
+
|
|
|
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
|
|
|
|
|
/*
|
|
@@ -2473,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
|
|
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
|
|
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
|
|
|
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
|
|
|
+#ifdef CONFIG_RCU_USER_QS
|
|
|
+ WARN_ON_ONCE(rdp->dynticks->in_user);
|
|
|
+#endif
|
|
|
rdp->cpu = cpu;
|
|
|
rdp->rsp = rsp;
|
|
|
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|