ftrace.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #ifndef _ASM_X86_FTRACE_H
  2. #define _ASM_X86_FTRACE_H
  3. #ifdef __ASSEMBLY__
  4. /* skip is set if the stack was already partially adjusted */
  5. .macro MCOUNT_SAVE_FRAME skip=0
  6. /*
  7. * We add enough stack to save all regs.
  8. */
  9. subq $(SS+8-\skip), %rsp
  10. movq %rax, RAX(%rsp)
  11. movq %rcx, RCX(%rsp)
  12. movq %rdx, RDX(%rsp)
  13. movq %rsi, RSI(%rsp)
  14. movq %rdi, RDI(%rsp)
  15. movq %r8, R8(%rsp)
  16. movq %r9, R9(%rsp)
  17. /* Move RIP to its proper location */
  18. movq SS+8(%rsp), %rdx
  19. movq %rdx, RIP(%rsp)
  20. .endm
  21. .macro MCOUNT_RESTORE_FRAME skip=0
  22. movq R9(%rsp), %r9
  23. movq R8(%rsp), %r8
  24. movq RDI(%rsp), %rdi
  25. movq RSI(%rsp), %rsi
  26. movq RDX(%rsp), %rdx
  27. movq RCX(%rsp), %rcx
  28. movq RAX(%rsp), %rax
  29. addq $(SS+8-\skip), %rsp
  30. .endm
  31. #endif
  32. #ifdef CONFIG_FUNCTION_TRACER
  33. #ifdef CC_USING_FENTRY
  34. # define MCOUNT_ADDR ((long)(__fentry__))
  35. #else
  36. # define MCOUNT_ADDR ((long)(mcount))
  37. #endif
  38. #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
  39. #ifdef CONFIG_DYNAMIC_FTRACE
  40. #define ARCH_SUPPORTS_FTRACE_OPS 1
  41. #endif
  42. #ifndef __ASSEMBLY__
  43. extern void mcount(void);
  44. extern atomic_t modifying_ftrace_code;
  45. extern void __fentry__(void);
  46. static inline unsigned long ftrace_call_adjust(unsigned long addr)
  47. {
  48. /*
  49. * addr is the address of the mcount call instruction.
  50. * recordmcount does the necessary offset calculation.
  51. */
  52. return addr;
  53. }
  54. #ifdef CONFIG_DYNAMIC_FTRACE
  55. struct dyn_arch_ftrace {
  56. /* No extra data needed for x86 */
  57. };
  58. int ftrace_int3_handler(struct pt_regs *regs);
  59. #endif /* CONFIG_DYNAMIC_FTRACE */
  60. #endif /* __ASSEMBLY__ */
  61. #endif /* CONFIG_FUNCTION_TRACER */
  62. #if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
  63. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
  64. #include <asm/compat.h>
  65. /*
  66. * Because ia32 syscalls do not map to x86_64 syscall numbers
  67. * this screws up the trace output when tracing a ia32 task.
  68. * Instead of reporting bogus syscalls, just do not trace them.
  69. *
  70. * If the user realy wants these, then they should use the
  71. * raw syscall tracepoints with filtering.
  72. */
  73. #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
  74. static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
  75. {
  76. if (is_compat_task())
  77. return true;
  78. return false;
  79. }
  80. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
  81. #endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
  82. #endif /* _ASM_X86_FTRACE_H */