ptrace.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. #ifndef ASM_X86__PTRACE_H
  2. #define ASM_X86__PTRACE_H
  3. #include <linux/compiler.h> /* For __user */
  4. #include <asm/ptrace-abi.h>
  5. #include <asm/processor-flags.h>
  6. #ifdef __KERNEL__
  7. #include <asm/ds.h> /* the DS BTS struct is used for ptrace too */
  8. #include <asm/segment.h>
  9. #endif
  10. #ifndef __ASSEMBLY__
  11. #ifdef __i386__
  12. /* this struct defines the way the registers are stored on the
  13. stack during a system call. */
  14. #ifndef __KERNEL__
  15. struct pt_regs {
  16. long ebx;
  17. long ecx;
  18. long edx;
  19. long esi;
  20. long edi;
  21. long ebp;
  22. long eax;
  23. int xds;
  24. int xes;
  25. int xfs;
  26. /* int gs; */
  27. long orig_eax;
  28. long eip;
  29. int xcs;
  30. long eflags;
  31. long esp;
  32. int xss;
  33. };
  34. #else /* __KERNEL__ */
  35. struct pt_regs {
  36. unsigned long bx;
  37. unsigned long cx;
  38. unsigned long dx;
  39. unsigned long si;
  40. unsigned long di;
  41. unsigned long bp;
  42. unsigned long ax;
  43. unsigned long ds;
  44. unsigned long es;
  45. unsigned long fs;
  46. /* int gs; */
  47. unsigned long orig_ax;
  48. unsigned long ip;
  49. unsigned long cs;
  50. unsigned long flags;
  51. unsigned long sp;
  52. unsigned long ss;
  53. };
  54. #endif /* __KERNEL__ */
  55. #else /* __i386__ */
  56. #ifndef __KERNEL__
  57. struct pt_regs {
  58. unsigned long r15;
  59. unsigned long r14;
  60. unsigned long r13;
  61. unsigned long r12;
  62. unsigned long rbp;
  63. unsigned long rbx;
  64. /* arguments: non interrupts/non tracing syscalls only save upto here*/
  65. unsigned long r11;
  66. unsigned long r10;
  67. unsigned long r9;
  68. unsigned long r8;
  69. unsigned long rax;
  70. unsigned long rcx;
  71. unsigned long rdx;
  72. unsigned long rsi;
  73. unsigned long rdi;
  74. unsigned long orig_rax;
  75. /* end of arguments */
  76. /* cpu exception frame or undefined */
  77. unsigned long rip;
  78. unsigned long cs;
  79. unsigned long eflags;
  80. unsigned long rsp;
  81. unsigned long ss;
  82. /* top of stack page */
  83. };
  84. #else /* __KERNEL__ */
  85. struct pt_regs {
  86. unsigned long r15;
  87. unsigned long r14;
  88. unsigned long r13;
  89. unsigned long r12;
  90. unsigned long bp;
  91. unsigned long bx;
  92. /* arguments: non interrupts/non tracing syscalls only save upto here*/
  93. unsigned long r11;
  94. unsigned long r10;
  95. unsigned long r9;
  96. unsigned long r8;
  97. unsigned long ax;
  98. unsigned long cx;
  99. unsigned long dx;
  100. unsigned long si;
  101. unsigned long di;
  102. unsigned long orig_ax;
  103. /* end of arguments */
  104. /* cpu exception frame or undefined */
  105. unsigned long ip;
  106. unsigned long cs;
  107. unsigned long flags;
  108. unsigned long sp;
  109. unsigned long ss;
  110. /* top of stack page */
  111. };
  112. #endif /* __KERNEL__ */
  113. #endif /* !__i386__ */
  114. #ifdef CONFIG_X86_PTRACE_BTS
  115. /* a branch trace record entry
  116. *
  117. * In order to unify the interface between various processor versions,
  118. * we use the below data structure for all processors.
  119. */
  120. enum bts_qualifier {
  121. BTS_INVALID = 0,
  122. BTS_BRANCH,
  123. BTS_TASK_ARRIVES,
  124. BTS_TASK_DEPARTS
  125. };
  126. struct bts_struct {
  127. __u64 qualifier;
  128. union {
  129. /* BTS_BRANCH */
  130. struct {
  131. __u64 from_ip;
  132. __u64 to_ip;
  133. } lbr;
  134. /* BTS_TASK_ARRIVES or
  135. BTS_TASK_DEPARTS */
  136. __u64 jiffies;
  137. } variant;
  138. };
  139. #endif /* CONFIG_X86_PTRACE_BTS */
  140. #ifdef __KERNEL__
  141. #include <linux/init.h>
  142. struct cpuinfo_x86;
  143. struct task_struct;
  144. #ifdef CONFIG_X86_PTRACE_BTS
  145. extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
  146. extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
  147. #else
  148. #define ptrace_bts_init_intel(config) do {} while (0)
  149. #endif /* CONFIG_X86_PTRACE_BTS */
  150. extern unsigned long profile_pc(struct pt_regs *regs);
  151. extern unsigned long
  152. convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
  153. #ifdef CONFIG_X86_32
  154. extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  155. int error_code);
  156. #else
  157. void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
  158. #endif
  159. extern long syscall_trace_enter(struct pt_regs *);
  160. extern void syscall_trace_leave(struct pt_regs *);
  161. static inline unsigned long regs_return_value(struct pt_regs *regs)
  162. {
  163. return regs->ax;
  164. }
  165. /*
  166. * user_mode_vm(regs) determines whether a register set came from user mode.
  167. * This is true if V8086 mode was enabled OR if the register set was from
  168. * protected mode with RPL-3 CS value. This tricky test checks that with
  169. * one comparison. Many places in the kernel can bypass this full check
  170. * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
  171. */
  172. static inline int user_mode(struct pt_regs *regs)
  173. {
  174. #ifdef CONFIG_X86_32
  175. return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
  176. #else
  177. return !!(regs->cs & 3);
  178. #endif
  179. }
  180. static inline int user_mode_vm(struct pt_regs *regs)
  181. {
  182. #ifdef CONFIG_X86_32
  183. return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
  184. USER_RPL;
  185. #else
  186. return user_mode(regs);
  187. #endif
  188. }
  189. static inline int v8086_mode(struct pt_regs *regs)
  190. {
  191. #ifdef CONFIG_X86_32
  192. return (regs->flags & X86_VM_MASK);
  193. #else
  194. return 0; /* No V86 mode support in long mode */
  195. #endif
  196. }
  197. /*
  198. * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
  199. * when it traps. So regs will be the current sp.
  200. *
  201. * This is valid only for kernel mode traps.
  202. */
  203. static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
  204. {
  205. #ifdef CONFIG_X86_32
  206. return (unsigned long)regs;
  207. #else
  208. return regs->sp;
  209. #endif
  210. }
  211. static inline unsigned long instruction_pointer(struct pt_regs *regs)
  212. {
  213. return regs->ip;
  214. }
  215. static inline unsigned long frame_pointer(struct pt_regs *regs)
  216. {
  217. return regs->bp;
  218. }
  219. static inline unsigned long user_stack_pointer(struct pt_regs *regs)
  220. {
  221. return regs->sp;
  222. }
  223. /*
  224. * These are defined as per linux/ptrace.h, which see.
  225. */
  226. #define arch_has_single_step() (1)
  227. extern void user_enable_single_step(struct task_struct *);
  228. extern void user_disable_single_step(struct task_struct *);
  229. extern void user_enable_block_step(struct task_struct *);
  230. #ifdef CONFIG_X86_DEBUGCTLMSR
  231. #define arch_has_block_step() (1)
  232. #else
  233. #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
  234. #endif
  235. struct user_desc;
  236. extern int do_get_thread_area(struct task_struct *p, int idx,
  237. struct user_desc __user *info);
  238. extern int do_set_thread_area(struct task_struct *p, int idx,
  239. struct user_desc __user *info, int can_allocate);
  240. #define __ARCH_WANT_COMPAT_SYS_PTRACE
  241. #endif /* __KERNEL__ */
  242. #endif /* !__ASSEMBLY__ */
  243. #endif /* ASM_X86__PTRACE_H */