ptrace.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. #ifndef _ASM_X86_PTRACE_H
  2. #define _ASM_X86_PTRACE_H
  3. #include <linux/compiler.h> /* For __user */
  4. #include <asm/ptrace-abi.h>
  5. #ifndef __ASSEMBLY__
  6. #ifdef __KERNEL__
  7. #include <asm/ds.h>
  8. struct task_struct;
  9. extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
  10. #endif /* __KERNEL__ */
  11. #ifdef __i386__
  12. /* this struct defines the way the registers are stored on the
  13. stack during a system call. */
  14. #ifndef __KERNEL__
  15. struct pt_regs {
  16. long ebx;
  17. long ecx;
  18. long edx;
  19. long esi;
  20. long edi;
  21. long ebp;
  22. long eax;
  23. int xds;
  24. int xes;
  25. int xfs;
  26. /* int gs; */
  27. long orig_eax;
  28. long eip;
  29. int xcs;
  30. long eflags;
  31. long esp;
  32. int xss;
  33. };
  34. #else /* __KERNEL__ */
  35. struct pt_regs {
  36. long bx;
  37. long cx;
  38. long dx;
  39. long si;
  40. long di;
  41. long bp;
  42. long ax;
  43. int ds;
  44. int es;
  45. int fs;
  46. /* int gs; */
  47. long orig_ax;
  48. long ip;
  49. int cs;
  50. long flags;
  51. long sp;
  52. int ss;
  53. };
  54. #include <asm/vm86.h>
  55. #include <asm/segment.h>
  56. struct task_struct;
  57. extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
  58. /*
  59. * user_mode_vm(regs) determines whether a register set came from user mode.
  60. * This is true if V8086 mode was enabled OR if the register set was from
  61. * protected mode with RPL-3 CS value. This tricky test checks that with
  62. * one comparison. Many places in the kernel can bypass this full check
  63. * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
  64. */
  65. static inline int user_mode(struct pt_regs *regs)
  66. {
  67. return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
  68. }
  69. static inline int user_mode_vm(struct pt_regs *regs)
  70. {
  71. return ((regs->cs & SEGMENT_RPL_MASK) |
  72. (regs->flags & VM_MASK)) >= USER_RPL;
  73. }
  74. static inline int v8086_mode(struct pt_regs *regs)
  75. {
  76. return (regs->flags & VM_MASK);
  77. }
  78. #define instruction_pointer(regs) ((regs)->ip)
  79. #define frame_pointer(regs) ((regs)->bp)
  80. #define stack_pointer(regs) ((unsigned long)(regs))
  81. #define regs_return_value(regs) ((regs)->ax)
  82. extern unsigned long profile_pc(struct pt_regs *regs);
  83. #endif /* __KERNEL__ */
  84. #else /* __i386__ */
  85. #ifndef __KERNEL__
  86. struct pt_regs {
  87. unsigned long r15;
  88. unsigned long r14;
  89. unsigned long r13;
  90. unsigned long r12;
  91. unsigned long rbp;
  92. unsigned long rbx;
  93. /* arguments: non interrupts/non tracing syscalls only save upto here*/
  94. unsigned long r11;
  95. unsigned long r10;
  96. unsigned long r9;
  97. unsigned long r8;
  98. unsigned long rax;
  99. unsigned long rcx;
  100. unsigned long rdx;
  101. unsigned long rsi;
  102. unsigned long rdi;
  103. unsigned long orig_rax;
  104. /* end of arguments */
  105. /* cpu exception frame or undefined */
  106. unsigned long rip;
  107. unsigned long cs;
  108. unsigned long eflags;
  109. unsigned long rsp;
  110. unsigned long ss;
  111. /* top of stack page */
  112. };
  113. #else /* __KERNEL__ */
  114. struct pt_regs {
  115. unsigned long r15;
  116. unsigned long r14;
  117. unsigned long r13;
  118. unsigned long r12;
  119. unsigned long bp;
  120. unsigned long bx;
  121. /* arguments: non interrupts/non tracing syscalls only save upto here*/
  122. unsigned long r11;
  123. unsigned long r10;
  124. unsigned long r9;
  125. unsigned long r8;
  126. unsigned long ax;
  127. unsigned long cx;
  128. unsigned long dx;
  129. unsigned long si;
  130. unsigned long di;
  131. unsigned long orig_ax;
  132. /* end of arguments */
  133. /* cpu exception frame or undefined */
  134. unsigned long ip;
  135. unsigned long cs;
  136. unsigned long flags;
  137. unsigned long sp;
  138. unsigned long ss;
  139. /* top of stack page */
  140. };
  141. #define user_mode(regs) (!!((regs)->cs & 3))
  142. #define user_mode_vm(regs) user_mode(regs)
  143. #define v8086_mode(regs) 0 /* No V86 mode support in long mode */
  144. #define instruction_pointer(regs) ((regs)->ip)
  145. #define frame_pointer(regs) ((regs)->bp)
  146. #define stack_pointer(regs) ((regs)->sp)
  147. #define regs_return_value(regs) ((regs)->ax)
  148. extern unsigned long profile_pc(struct pt_regs *regs);
  149. void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
  150. struct task_struct;
  151. extern unsigned long
  152. convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
  153. #endif /* __KERNEL__ */
  154. #endif /* !__i386__ */
  155. #ifdef __KERNEL__
  156. /*
  157. * These are defined as per linux/ptrace.h, which see.
  158. */
  159. #define arch_has_single_step() (1)
  160. extern void user_enable_single_step(struct task_struct *);
  161. extern void user_disable_single_step(struct task_struct *);
  162. extern void user_enable_block_step(struct task_struct *);
  163. #ifdef CONFIG_X86_DEBUGCTLMSR
  164. #define arch_has_block_step() (1)
  165. #else
  166. #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
  167. #endif
  168. struct user_desc;
  169. extern int do_get_thread_area(struct task_struct *p, int idx,
  170. struct user_desc __user *info);
  171. extern int do_set_thread_area(struct task_struct *p, int idx,
  172. struct user_desc __user *info, int can_allocate);
  173. #endif /* __KERNEL__ */
  174. #endif /* !__ASSEMBLY__ */
  175. #endif