context_tracking.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. #include <linux/context_tracking.h>
  2. #include <linux/kvm_host.h>
  3. #include <linux/rcupdate.h>
  4. #include <linux/sched.h>
  5. #include <linux/hardirq.h>
  6. #include <linux/export.h>
  7. DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
  8. #ifdef CONFIG_CONTEXT_TRACKING_FORCE
  9. .active = true,
  10. #endif
  11. };
  12. void user_enter(void)
  13. {
  14. unsigned long flags;
  15. /*
  16. * Some contexts may involve an exception occuring in an irq,
  17. * leading to that nesting:
  18. * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
  19. * This would mess up the dyntick_nesting count though. And rcu_irq_*()
  20. * helpers are enough to protect RCU uses inside the exception. So
  21. * just return immediately if we detect we are in an IRQ.
  22. */
  23. if (in_interrupt())
  24. return;
  25. WARN_ON_ONCE(!current->mm);
  26. local_irq_save(flags);
  27. if (__this_cpu_read(context_tracking.active) &&
  28. __this_cpu_read(context_tracking.state) != IN_USER) {
  29. vtime_user_enter(current);
  30. rcu_user_enter();
  31. __this_cpu_write(context_tracking.state, IN_USER);
  32. }
  33. local_irq_restore(flags);
  34. }
  35. void user_exit(void)
  36. {
  37. unsigned long flags;
  38. /*
  39. * Some contexts may involve an exception occuring in an irq,
  40. * leading to that nesting:
  41. * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
  42. * This would mess up the dyntick_nesting count though. And rcu_irq_*()
  43. * helpers are enough to protect RCU uses inside the exception. So
  44. * just return immediately if we detect we are in an IRQ.
  45. */
  46. if (in_interrupt())
  47. return;
  48. local_irq_save(flags);
  49. if (__this_cpu_read(context_tracking.state) == IN_USER) {
  50. rcu_user_exit();
  51. vtime_user_exit(current);
  52. __this_cpu_write(context_tracking.state, IN_KERNEL);
  53. }
  54. local_irq_restore(flags);
  55. }
  56. void guest_enter(void)
  57. {
  58. if (vtime_accounting_enabled())
  59. vtime_guest_enter(current);
  60. else
  61. __guest_enter();
  62. }
  63. EXPORT_SYMBOL_GPL(guest_enter);
  64. void guest_exit(void)
  65. {
  66. if (vtime_accounting_enabled())
  67. vtime_guest_exit(current);
  68. else
  69. __guest_exit();
  70. }
  71. EXPORT_SYMBOL_GPL(guest_exit);
  72. void context_tracking_task_switch(struct task_struct *prev,
  73. struct task_struct *next)
  74. {
  75. if (__this_cpu_read(context_tracking.active)) {
  76. clear_tsk_thread_flag(prev, TIF_NOHZ);
  77. set_tsk_thread_flag(next, TIF_NOHZ);
  78. }
  79. }