step.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * x86 single-step support code, common to 32-bit and 64-bit.
  3. */
  4. #include <linux/sched.h>
  5. #include <linux/mm.h>
  6. #include <linux/ptrace.h>
  7. #include <asm/desc.h>
  8. unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
  9. {
  10. unsigned long addr, seg;
  11. addr = regs->ip;
  12. seg = regs->cs & 0xffff;
  13. if (v8086_mode(regs)) {
  14. addr = (addr & 0xffff) + (seg << 4);
  15. return addr;
  16. }
  17. /*
  18. * We'll assume that the code segments in the GDT
  19. * are all zero-based. That is largely true: the
  20. * TLS segments are used for data, and the PNPBIOS
  21. * and APM bios ones we just ignore here.
  22. */
  23. if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
  24. struct desc_struct *desc;
  25. unsigned long base;
  26. seg &= ~7UL;
  27. mutex_lock(&child->mm->context.lock);
  28. if (unlikely((seg >> 3) >= child->mm->context.size))
  29. addr = -1L; /* bogus selector, access would fault */
  30. else {
  31. desc = child->mm->context.ldt + seg;
  32. base = get_desc_base(desc);
  33. /* 16-bit code segment? */
  34. if (!desc->d)
  35. addr &= 0xffff;
  36. addr += base;
  37. }
  38. mutex_unlock(&child->mm->context.lock);
  39. }
  40. return addr;
  41. }
  42. static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
  43. {
  44. int i, copied;
  45. unsigned char opcode[15];
  46. unsigned long addr = convert_ip_to_linear(child, regs);
  47. copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
  48. for (i = 0; i < copied; i++) {
  49. switch (opcode[i]) {
  50. /* popf and iret */
  51. case 0x9d: case 0xcf:
  52. return 1;
  53. /* CHECKME: 64 65 */
  54. /* opcode and address size prefixes */
  55. case 0x66: case 0x67:
  56. continue;
  57. /* irrelevant prefixes (segment overrides and repeats) */
  58. case 0x26: case 0x2e:
  59. case 0x36: case 0x3e:
  60. case 0x64: case 0x65:
  61. case 0xf0: case 0xf2: case 0xf3:
  62. continue;
  63. #ifdef CONFIG_X86_64
  64. case 0x40 ... 0x4f:
  65. if (regs->cs != __USER_CS)
  66. /* 32-bit mode: register increment */
  67. return 0;
  68. /* 64-bit mode: REX prefix */
  69. continue;
  70. #endif
  71. /* CHECKME: f2, f3 */
  72. /*
  73. * pushf: NOTE! We should probably not let
  74. * the user see the TF bit being set. But
  75. * it's more pain than it's worth to avoid
  76. * it, and a debugger could emulate this
  77. * all in user space if it _really_ cares.
  78. */
  79. case 0x9c:
  80. default:
  81. return 0;
  82. }
  83. }
  84. return 0;
  85. }
  86. /*
  87. * Enable single-stepping. Return nonzero if user mode is not using TF itself.
  88. */
  89. static int enable_single_step(struct task_struct *child)
  90. {
  91. struct pt_regs *regs = task_pt_regs(child);
  92. unsigned long oflags;
  93. /*
  94. * If we stepped into a sysenter/syscall insn, it trapped in
  95. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  96. * If user-mode had set TF itself, then it's still clear from
  97. * do_debug() and we need to set it again to restore the user
  98. * state so we don't wrongly set TIF_FORCED_TF below.
  99. * If enable_single_step() was used last and that is what
  100. * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
  101. * already set and our bookkeeping is fine.
  102. */
  103. if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
  104. regs->flags |= X86_EFLAGS_TF;
  105. /*
  106. * Always set TIF_SINGLESTEP - this guarantees that
  107. * we single-step system calls etc.. This will also
  108. * cause us to set TF when returning to user mode.
  109. */
  110. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  111. oflags = regs->flags;
  112. /* Set TF on the kernel stack.. */
  113. regs->flags |= X86_EFLAGS_TF;
  114. /*
  115. * ..but if TF is changed by the instruction we will trace,
  116. * don't mark it as being "us" that set it, so that we
  117. * won't clear it by hand later.
  118. *
  119. * Note that if we don't actually execute the popf because
  120. * of a signal arriving right now or suchlike, we will lose
  121. * track of the fact that it really was "us" that set it.
  122. */
  123. if (is_setting_trap_flag(child, regs)) {
  124. clear_tsk_thread_flag(child, TIF_FORCED_TF);
  125. return 0;
  126. }
  127. /*
  128. * If TF was already set, check whether it was us who set it.
  129. * If not, we should never attempt a block step.
  130. */
  131. if (oflags & X86_EFLAGS_TF)
  132. return test_tsk_thread_flag(child, TIF_FORCED_TF);
  133. set_tsk_thread_flag(child, TIF_FORCED_TF);
  134. return 1;
  135. }
  136. /*
  137. * Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
  138. */
  139. static void write_debugctlmsr(struct task_struct *child, unsigned long val)
  140. {
  141. if (child->thread.debugctlmsr == val)
  142. return;
  143. child->thread.debugctlmsr = val;
  144. if (child != current)
  145. return;
  146. update_debugctlmsr(val);
  147. }
  148. /*
  149. * Enable single or block step.
  150. */
  151. static void enable_step(struct task_struct *child, bool block)
  152. {
  153. /*
  154. * Make sure block stepping (BTF) is not enabled unless it should be.
  155. * Note that we don't try to worry about any is_setting_trap_flag()
  156. * instructions after the first when using block stepping.
  157. * So noone should try to use debugger block stepping in a program
  158. * that uses user-mode single stepping itself.
  159. */
  160. if (enable_single_step(child) && block) {
  161. set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  162. write_debugctlmsr(child,
  163. child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
  164. } else {
  165. write_debugctlmsr(child,
  166. child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
  167. if (!child->thread.debugctlmsr)
  168. clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  169. }
  170. }
  171. void user_enable_single_step(struct task_struct *child)
  172. {
  173. enable_step(child, 0);
  174. }
  175. void user_enable_block_step(struct task_struct *child)
  176. {
  177. enable_step(child, 1);
  178. }
  179. void user_disable_single_step(struct task_struct *child)
  180. {
  181. /*
  182. * Make sure block stepping (BTF) is disabled.
  183. */
  184. write_debugctlmsr(child,
  185. child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
  186. if (!child->thread.debugctlmsr)
  187. clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  188. /* Always clear TIF_SINGLESTEP... */
  189. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  190. /* But touch TF only if it was set by us.. */
  191. if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
  192. task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
  193. }