|
@@ -54,17 +54,31 @@ void user_enter(void)
|
|
|
WARN_ON_ONCE(!current->mm);
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- if (__this_cpu_read(context_tracking.active) &&
|
|
|
- __this_cpu_read(context_tracking.state) != IN_USER) {
|
|
|
+ if ( __this_cpu_read(context_tracking.state) != IN_USER) {
|
|
|
+ if (__this_cpu_read(context_tracking.active)) {
|
|
|
+ /*
|
|
|
+ * At this stage, only low level arch entry code remains and
|
|
|
+ * then we'll run in userspace. We can assume there won't be
|
|
|
+ * any RCU read-side critical section until the next call to
|
|
|
+ * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
|
+ * on the tick.
|
|
|
+ */
|
|
|
+ vtime_user_enter(current);
|
|
|
+ rcu_user_enter();
|
|
|
+ }
|
|
|
/*
|
|
|
- * At this stage, only low level arch entry code remains and
|
|
|
- * then we'll run in userspace. We can assume there won't be
|
|
|
- * any RCU read-side critical section until the next call to
|
|
|
- * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
|
- * on the tick.
|
|
|
+ * Even if context tracking is disabled on this CPU, because it's outside
|
|
|
+ * the full dynticks mask for example, we still have to keep track of the
|
|
|
+ * context transitions and states to prevent inconsistency on those of
|
|
|
+ * other CPUs.
|
|
|
+ * If a task triggers an exception in userspace, sleep on the exception
|
|
|
+ * handler and then migrate to another CPU, that new CPU must know where
|
|
|
+ * the exception returns by the time we call exception_exit().
|
|
|
+ * This information can only be provided by the previous CPU when it called
|
|
|
+ * exception_enter().
|
|
|
+ * OTOH we can spare the calls to vtime and RCU when context_tracking.active
|
|
|
+ * is false because we know that CPU is not tickless.
|
|
|
*/
|
|
|
- vtime_user_enter(current);
|
|
|
- rcu_user_enter();
|
|
|
__this_cpu_write(context_tracking.state, IN_USER);
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
@@ -130,12 +144,14 @@ void user_exit(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
if (__this_cpu_read(context_tracking.state) == IN_USER) {
|
|
|
- /*
|
|
|
- * We are going to run code that may use RCU. Inform
|
|
|
- * RCU core about that (ie: we may need the tick again).
|
|
|
- */
|
|
|
- rcu_user_exit();
|
|
|
- vtime_user_exit(current);
|
|
|
+ if (__this_cpu_read(context_tracking.active)) {
|
|
|
+ /*
|
|
|
+ * We are going to run code that may use RCU. Inform
|
|
|
+ * RCU core about that (ie: we may need the tick again).
|
|
|
+ */
|
|
|
+ rcu_user_exit();
|
|
|
+ vtime_user_exit(current);
|
|
|
+ }
|
|
|
__this_cpu_write(context_tracking.state, IN_KERNEL);
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
@@ -178,8 +194,6 @@ EXPORT_SYMBOL_GPL(guest_exit);
|
|
|
void context_tracking_task_switch(struct task_struct *prev,
|
|
|
struct task_struct *next)
|
|
|
{
|
|
|
- if (__this_cpu_read(context_tracking.active)) {
|
|
|
- clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
|
- set_tsk_thread_flag(next, TIF_NOHZ);
|
|
|
- }
|
|
|
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
|
+ set_tsk_thread_flag(next, TIF_NOHZ);
|
|
|
}
|