|
@@ -20,22 +20,33 @@
|
|
|
#include <linux/hardirq.h>
|
|
|
#include <linux/export.h>
|
|
|
|
|
|
-DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
|
|
|
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
|
|
|
- .active = true,
|
|
|
-#endif
|
|
|
-};
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include <trace/events/context_tracking.h>
|
|
|
+
|
|
|
+struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
|
|
|
+EXPORT_SYMBOL_GPL(context_tracking_enabled);
|
|
|
+
|
|
|
+DEFINE_PER_CPU(struct context_tracking, context_tracking);
|
|
|
+EXPORT_SYMBOL_GPL(context_tracking);
|
|
|
+
|
|
|
+void context_tracking_cpu_set(int cpu)
|
|
|
+{
|
|
|
+ if (!per_cpu(context_tracking.active, cpu)) {
|
|
|
+ per_cpu(context_tracking.active, cpu) = true;
|
|
|
+ static_key_slow_inc(&context_tracking_enabled);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
- * user_enter - Inform the context tracking that the CPU is going to
|
|
|
- * enter userspace mode.
|
|
|
+ * context_tracking_user_enter - Inform the context tracking that the CPU is going to
|
|
|
+ * enter userspace mode.
|
|
|
*
|
|
|
* This function must be called right before we switch from the kernel
|
|
|
* to userspace, when it's guaranteed the remaining kernel instructions
|
|
|
* to execute won't use any RCU read side critical section because this
|
|
|
* function sets RCU in extended quiescent state.
|
|
|
*/
|
|
|
-void user_enter(void)
|
|
|
+void context_tracking_user_enter(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -54,17 +65,32 @@ void user_enter(void)
|
|
|
WARN_ON_ONCE(!current->mm);
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
- if (__this_cpu_read(context_tracking.active) &&
|
|
|
- __this_cpu_read(context_tracking.state) != IN_USER) {
|
|
|
+ if ( __this_cpu_read(context_tracking.state) != IN_USER) {
|
|
|
+ if (__this_cpu_read(context_tracking.active)) {
|
|
|
+ trace_user_enter(0);
|
|
|
+ /*
|
|
|
+ * At this stage, only low level arch entry code remains and
|
|
|
+ * then we'll run in userspace. We can assume there won't be
|
|
|
+ * any RCU read-side critical section until the next call to
|
|
|
+ * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
|
+ * on the tick.
|
|
|
+ */
|
|
|
+ vtime_user_enter(current);
|
|
|
+ rcu_user_enter();
|
|
|
+ }
|
|
|
/*
|
|
|
- * At this stage, only low level arch entry code remains and
|
|
|
- * then we'll run in userspace. We can assume there won't be
|
|
|
- * any RCU read-side critical section until the next call to
|
|
|
- * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
|
|
|
- * on the tick.
|
|
|
+ * Even if context tracking is disabled on this CPU, because it's outside
|
|
|
+ * the full dynticks mask for example, we still have to keep track of the
|
|
|
+ * context transitions and states to prevent inconsistency on those of
|
|
|
+ * other CPUs.
|
|
|
+ * If a task triggers an exception in userspace, sleep on the exception
|
|
|
+ * handler and then migrate to another CPU, that new CPU must know where
|
|
|
+ * the exception returns by the time we call exception_exit().
|
|
|
+ * This information can only be provided by the previous CPU when it called
|
|
|
+ * exception_enter().
|
|
|
+ * OTOH we can spare the calls to vtime and RCU when context_tracking.active
|
|
|
+ * is false because we know that CPU is not tickless.
|
|
|
*/
|
|
|
- vtime_user_enter(current);
|
|
|
- rcu_user_enter();
|
|
|
__this_cpu_write(context_tracking.state, IN_USER);
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
@@ -87,10 +113,9 @@ void user_enter(void)
|
|
|
*/
|
|
|
void __sched notrace preempt_schedule_context(void)
|
|
|
{
|
|
|
- struct thread_info *ti = current_thread_info();
|
|
|
enum ctx_state prev_ctx;
|
|
|
|
|
|
- if (likely(ti->preempt_count || irqs_disabled()))
|
|
|
+ if (likely(!preemptible()))
|
|
|
return;
|
|
|
|
|
|
/*
|
|
@@ -112,8 +137,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
|
|
|
#endif /* CONFIG_PREEMPT */
|
|
|
|
|
|
/**
|
|
|
- * user_exit - Inform the context tracking that the CPU is
|
|
|
- * exiting userspace mode and entering the kernel.
|
|
|
+ * context_tracking_user_exit - Inform the context tracking that the CPU is
|
|
|
+ * exiting userspace mode and entering the kernel.
|
|
|
*
|
|
|
* This function must be called after we entered the kernel from userspace
|
|
|
* before any use of RCU read side critical section. This potentially include
|
|
@@ -122,7 +147,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
|
|
|
* This call supports re-entrancy. This way it can be called from any exception
|
|
|
* handler without needing to know if we came from userspace or not.
|
|
|
*/
|
|
|
-void user_exit(void)
|
|
|
+void context_tracking_user_exit(void)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -131,38 +156,22 @@ void user_exit(void)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
if (__this_cpu_read(context_tracking.state) == IN_USER) {
|
|
|
- /*
|
|
|
- * We are going to run code that may use RCU. Inform
|
|
|
- * RCU core about that (ie: we may need the tick again).
|
|
|
- */
|
|
|
- rcu_user_exit();
|
|
|
- vtime_user_exit(current);
|
|
|
+ if (__this_cpu_read(context_tracking.active)) {
|
|
|
+ /*
|
|
|
+ * We are going to run code that may use RCU. Inform
|
|
|
+ * RCU core about that (ie: we may need the tick again).
|
|
|
+ */
|
|
|
+ rcu_user_exit();
|
|
|
+ vtime_user_exit(current);
|
|
|
+ trace_user_exit(0);
|
|
|
+ }
|
|
|
__this_cpu_write(context_tracking.state, IN_KERNEL);
|
|
|
}
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-void guest_enter(void)
|
|
|
-{
|
|
|
- if (vtime_accounting_enabled())
|
|
|
- vtime_guest_enter(current);
|
|
|
- else
|
|
|
- __guest_enter();
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(guest_enter);
|
|
|
-
|
|
|
-void guest_exit(void)
|
|
|
-{
|
|
|
- if (vtime_accounting_enabled())
|
|
|
- vtime_guest_exit(current);
|
|
|
- else
|
|
|
- __guest_exit();
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(guest_exit);
|
|
|
-
|
|
|
-
|
|
|
/**
|
|
|
- * context_tracking_task_switch - context switch the syscall callbacks
|
|
|
+ * __context_tracking_task_switch - context switch the syscall callbacks
|
|
|
* @prev: the task that is being switched out
|
|
|
* @next: the task that is being switched in
|
|
|
*
|
|
@@ -174,11 +183,19 @@ EXPORT_SYMBOL_GPL(guest_exit);
|
|
|
* migrate to some CPU that doesn't do the context tracking. As such the TIF
|
|
|
* flag may not be desired there.
|
|
|
*/
|
|
|
-void context_tracking_task_switch(struct task_struct *prev,
|
|
|
- struct task_struct *next)
|
|
|
+void __context_tracking_task_switch(struct task_struct *prev,
|
|
|
+ struct task_struct *next)
|
|
|
{
|
|
|
- if (__this_cpu_read(context_tracking.active)) {
|
|
|
- clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
|
- set_tsk_thread_flag(next, TIF_NOHZ);
|
|
|
- }
|
|
|
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
|
|
|
+ set_tsk_thread_flag(next, TIF_NOHZ);
|
|
|
}
|
|
|
+
|
|
|
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
|
|
|
+void __init context_tracking_init(void)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ context_tracking_cpu_set(cpu);
|
|
|
+}
|
|
|
+#endif
|