context_tracking.h 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. #ifndef _LINUX_CONTEXT_TRACKING_H
  2. #define _LINUX_CONTEXT_TRACKING_H
  3. #include <linux/sched.h>
  4. #include <linux/percpu.h>
  5. #include <asm/ptrace.h>
  6. struct context_tracking {
  7. /*
  8. * When active is false, probes are unset in order
  9. * to minimize overhead: TIF flags are cleared
  10. * and calls to user_enter/exit are ignored. This
  11. * may be further optimized using static keys.
  12. */
  13. bool active;
  14. enum ctx_state {
  15. IN_KERNEL = 0,
  16. IN_USER,
  17. } state;
  18. };
  19. #ifdef CONFIG_CONTEXT_TRACKING
  20. DECLARE_PER_CPU(struct context_tracking, context_tracking);
  21. static inline bool context_tracking_in_user(void)
  22. {
  23. return __this_cpu_read(context_tracking.state) == IN_USER;
  24. }
  25. static inline bool context_tracking_active(void)
  26. {
  27. return __this_cpu_read(context_tracking.active);
  28. }
  29. extern void user_enter(void);
  30. extern void user_exit(void);
  31. static inline enum ctx_state exception_enter(void)
  32. {
  33. enum ctx_state prev_ctx;
  34. prev_ctx = this_cpu_read(context_tracking.state);
  35. user_exit();
  36. return prev_ctx;
  37. }
  38. static inline void exception_exit(enum ctx_state prev_ctx)
  39. {
  40. if (prev_ctx == IN_USER)
  41. user_enter();
  42. }
  43. extern void context_tracking_task_switch(struct task_struct *prev,
  44. struct task_struct *next);
  45. #else
  46. static inline bool context_tracking_in_user(void) { return false; }
  47. static inline void user_enter(void) { }
  48. static inline void user_exit(void) { }
  49. static inline enum ctx_state exception_enter(void) { return 0; }
  50. static inline void exception_exit(enum ctx_state prev_ctx) { }
  51. static inline void context_tracking_task_switch(struct task_struct *prev,
  52. struct task_struct *next) { }
  53. #endif /* !CONFIG_CONTEXT_TRACKING */
  54. #endif