ptrace_64.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. *
  6. * x86-64 port 2000-2002 Andi Kleen
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/errno.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/user.h>
  15. #include <linux/security.h>
  16. #include <linux/audit.h>
  17. #include <linux/seccomp.h>
  18. #include <linux/signal.h>
  19. #include <asm/uaccess.h>
  20. #include <asm/pgtable.h>
  21. #include <asm/system.h>
  22. #include <asm/processor.h>
  23. #include <asm/prctl.h>
  24. #include <asm/i387.h>
  25. #include <asm/debugreg.h>
  26. #include <asm/ldt.h>
  27. #include <asm/desc.h>
  28. #include <asm/proto.h>
  29. #include <asm/ia32.h>
  30. /*
  31. * does not yet catch signals sent when the child dies.
  32. * in exit.c or in signal.c.
  33. */
  34. /*
  35. * Determines which flags the user has access to [1 = access, 0 = no access].
  36. * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
  37. * Also masks reserved bits (63-22, 15, 5, 3, 1).
  38. */
  39. #define FLAG_MASK 0x54dd5UL
  40. /*
  41. * eflags and offset of eflags on child stack..
  42. */
  43. #define EFLAGS offsetof(struct pt_regs, eflags)
  44. #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
  45. /*
  46. * this routine will get a word off of the processes privileged stack.
  47. * the offset is how far from the base addr as stored in the TSS.
  48. * this routine assumes that all the privileged stacks are in our
  49. * data space.
  50. */
  51. static inline unsigned long get_stack_long(struct task_struct *task, int offset)
  52. {
  53. unsigned char *stack;
  54. stack = (unsigned char *)task->thread.rsp0;
  55. stack += offset;
  56. return (*((unsigned long *)stack));
  57. }
  58. /*
  59. * this routine will put a word on the processes privileged stack.
  60. * the offset is how far from the base addr as stored in the TSS.
  61. * this routine assumes that all the privileged stacks are in our
  62. * data space.
  63. */
  64. static inline long put_stack_long(struct task_struct *task, int offset,
  65. unsigned long data)
  66. {
  67. unsigned char * stack;
  68. stack = (unsigned char *) task->thread.rsp0;
  69. stack += offset;
  70. *(unsigned long *) stack = data;
  71. return 0;
  72. }
  73. #define LDT_SEGMENT 4
  74. unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
  75. {
  76. unsigned long addr, seg;
  77. addr = regs->rip;
  78. seg = regs->cs & 0xffff;
  79. /*
  80. * We'll assume that the code segments in the GDT
  81. * are all zero-based. That is largely true: the
  82. * TLS segments are used for data, and the PNPBIOS
  83. * and APM bios ones we just ignore here.
  84. */
  85. if (seg & LDT_SEGMENT) {
  86. u32 *desc;
  87. unsigned long base;
  88. seg &= ~7UL;
  89. mutex_lock(&child->mm->context.lock);
  90. if (unlikely((seg >> 3) >= child->mm->context.size))
  91. addr = -1L; /* bogus selector, access would fault */
  92. else {
  93. desc = child->mm->context.ldt + seg;
  94. base = ((desc[0] >> 16) |
  95. ((desc[1] & 0xff) << 16) |
  96. (desc[1] & 0xff000000));
  97. /* 16-bit code segment? */
  98. if (!((desc[1] >> 22) & 1))
  99. addr &= 0xffff;
  100. addr += base;
  101. }
  102. mutex_unlock(&child->mm->context.lock);
  103. }
  104. return addr;
  105. }
  106. static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
  107. {
  108. int i, copied;
  109. unsigned char opcode[15];
  110. unsigned long addr = convert_rip_to_linear(child, regs);
  111. copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
  112. for (i = 0; i < copied; i++) {
  113. switch (opcode[i]) {
  114. /* popf and iret */
  115. case 0x9d: case 0xcf:
  116. return 1;
  117. /* CHECKME: 64 65 */
  118. /* opcode and address size prefixes */
  119. case 0x66: case 0x67:
  120. continue;
  121. /* irrelevant prefixes (segment overrides and repeats) */
  122. case 0x26: case 0x2e:
  123. case 0x36: case 0x3e:
  124. case 0x64: case 0x65:
  125. case 0xf2: case 0xf3:
  126. continue;
  127. case 0x40 ... 0x4f:
  128. if (regs->cs != __USER_CS)
  129. /* 32-bit mode: register increment */
  130. return 0;
  131. /* 64-bit mode: REX prefix */
  132. continue;
  133. /* CHECKME: f2, f3 */
  134. /*
  135. * pushf: NOTE! We should probably not let
  136. * the user see the TF bit being set. But
  137. * it's more pain than it's worth to avoid
  138. * it, and a debugger could emulate this
  139. * all in user space if it _really_ cares.
  140. */
  141. case 0x9c:
  142. default:
  143. return 0;
  144. }
  145. }
  146. return 0;
  147. }
  148. static void set_singlestep(struct task_struct *child)
  149. {
  150. struct pt_regs *regs = task_pt_regs(child);
  151. /*
  152. * Always set TIF_SINGLESTEP - this guarantees that
  153. * we single-step system calls etc.. This will also
  154. * cause us to set TF when returning to user mode.
  155. */
  156. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  157. /*
  158. * If TF was already set, don't do anything else
  159. */
  160. if (regs->eflags & X86_EFLAGS_TF)
  161. return;
  162. /* Set TF on the kernel stack.. */
  163. regs->eflags |= X86_EFLAGS_TF;
  164. /*
  165. * ..but if TF is changed by the instruction we will trace,
  166. * don't mark it as being "us" that set it, so that we
  167. * won't clear it by hand later.
  168. */
  169. if (is_setting_trap_flag(child, regs))
  170. return;
  171. child->ptrace |= PT_DTRACE;
  172. }
  173. static void clear_singlestep(struct task_struct *child)
  174. {
  175. /* Always clear TIF_SINGLESTEP... */
  176. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  177. /* But touch TF only if it was set by us.. */
  178. if (child->ptrace & PT_DTRACE) {
  179. struct pt_regs *regs = task_pt_regs(child);
  180. regs->eflags &= ~X86_EFLAGS_TF;
  181. child->ptrace &= ~PT_DTRACE;
  182. }
  183. }
  184. /*
  185. * Called by kernel/ptrace.c when detaching..
  186. *
  187. * Make sure the single step bit is not set.
  188. */
  189. void ptrace_disable(struct task_struct *child)
  190. {
  191. clear_singlestep(child);
  192. }
  193. static int putreg(struct task_struct *child,
  194. unsigned long regno, unsigned long value)
  195. {
  196. unsigned long tmp;
  197. switch (regno) {
  198. case offsetof(struct user_regs_struct,fs):
  199. if (value && (value & 3) != 3)
  200. return -EIO;
  201. child->thread.fsindex = value & 0xffff;
  202. return 0;
  203. case offsetof(struct user_regs_struct,gs):
  204. if (value && (value & 3) != 3)
  205. return -EIO;
  206. child->thread.gsindex = value & 0xffff;
  207. return 0;
  208. case offsetof(struct user_regs_struct,ds):
  209. if (value && (value & 3) != 3)
  210. return -EIO;
  211. child->thread.ds = value & 0xffff;
  212. return 0;
  213. case offsetof(struct user_regs_struct,es):
  214. if (value && (value & 3) != 3)
  215. return -EIO;
  216. child->thread.es = value & 0xffff;
  217. return 0;
  218. case offsetof(struct user_regs_struct,ss):
  219. if ((value & 3) != 3)
  220. return -EIO;
  221. value &= 0xffff;
  222. return 0;
  223. case offsetof(struct user_regs_struct,fs_base):
  224. if (value >= TASK_SIZE_OF(child))
  225. return -EIO;
  226. /*
  227. * When changing the segment base, use do_arch_prctl
  228. * to set either thread.fs or thread.fsindex and the
  229. * corresponding GDT slot.
  230. */
  231. if (child->thread.fs != value)
  232. return do_arch_prctl(child, ARCH_SET_FS, value);
  233. return 0;
  234. case offsetof(struct user_regs_struct,gs_base):
  235. /*
  236. * Exactly the same here as the %fs handling above.
  237. */
  238. if (value >= TASK_SIZE_OF(child))
  239. return -EIO;
  240. if (child->thread.gs != value)
  241. return do_arch_prctl(child, ARCH_SET_GS, value);
  242. return 0;
  243. case offsetof(struct user_regs_struct, eflags):
  244. value &= FLAG_MASK;
  245. tmp = get_stack_long(child, EFL_OFFSET);
  246. tmp &= ~FLAG_MASK;
  247. value |= tmp;
  248. break;
  249. case offsetof(struct user_regs_struct,cs):
  250. if ((value & 3) != 3)
  251. return -EIO;
  252. value &= 0xffff;
  253. break;
  254. }
  255. put_stack_long(child, regno - sizeof(struct pt_regs), value);
  256. return 0;
  257. }
  258. static unsigned long getreg(struct task_struct *child, unsigned long regno)
  259. {
  260. unsigned long val;
  261. switch (regno) {
  262. case offsetof(struct user_regs_struct, fs):
  263. return child->thread.fsindex;
  264. case offsetof(struct user_regs_struct, gs):
  265. return child->thread.gsindex;
  266. case offsetof(struct user_regs_struct, ds):
  267. return child->thread.ds;
  268. case offsetof(struct user_regs_struct, es):
  269. return child->thread.es;
  270. case offsetof(struct user_regs_struct, fs_base):
  271. /*
  272. * do_arch_prctl may have used a GDT slot instead of
  273. * the MSR. To userland, it appears the same either
  274. * way, except the %fs segment selector might not be 0.
  275. */
  276. if (child->thread.fs != 0)
  277. return child->thread.fs;
  278. if (child->thread.fsindex != FS_TLS_SEL)
  279. return 0;
  280. return get_desc_base(&child->thread.tls_array[FS_TLS]);
  281. case offsetof(struct user_regs_struct, gs_base):
  282. /*
  283. * Exactly the same here as the %fs handling above.
  284. */
  285. if (child->thread.gs != 0)
  286. return child->thread.gs;
  287. if (child->thread.gsindex != GS_TLS_SEL)
  288. return 0;
  289. return get_desc_base(&child->thread.tls_array[GS_TLS]);
  290. default:
  291. regno = regno - sizeof(struct pt_regs);
  292. val = get_stack_long(child, regno);
  293. if (test_tsk_thread_flag(child, TIF_IA32))
  294. val &= 0xffffffff;
  295. return val;
  296. }
  297. }
  298. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  299. {
  300. long i, ret;
  301. unsigned ui;
  302. switch (request) {
  303. /* when I and D space are separate, these will need to be fixed. */
  304. case PTRACE_PEEKTEXT: /* read word at location addr. */
  305. case PTRACE_PEEKDATA:
  306. ret = generic_ptrace_peekdata(child, addr, data);
  307. break;
  308. /* read the word at location addr in the USER area. */
  309. case PTRACE_PEEKUSR: {
  310. unsigned long tmp;
  311. ret = -EIO;
  312. if ((addr & 7) ||
  313. addr > sizeof(struct user) - 7)
  314. break;
  315. switch (addr) {
  316. case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
  317. tmp = getreg(child, addr);
  318. break;
  319. case offsetof(struct user, u_debugreg[0]):
  320. tmp = child->thread.debugreg0;
  321. break;
  322. case offsetof(struct user, u_debugreg[1]):
  323. tmp = child->thread.debugreg1;
  324. break;
  325. case offsetof(struct user, u_debugreg[2]):
  326. tmp = child->thread.debugreg2;
  327. break;
  328. case offsetof(struct user, u_debugreg[3]):
  329. tmp = child->thread.debugreg3;
  330. break;
  331. case offsetof(struct user, u_debugreg[6]):
  332. tmp = child->thread.debugreg6;
  333. break;
  334. case offsetof(struct user, u_debugreg[7]):
  335. tmp = child->thread.debugreg7;
  336. break;
  337. default:
  338. tmp = 0;
  339. break;
  340. }
  341. ret = put_user(tmp,(unsigned long __user *) data);
  342. break;
  343. }
  344. /* when I and D space are separate, this will have to be fixed. */
  345. case PTRACE_POKETEXT: /* write the word at location addr. */
  346. case PTRACE_POKEDATA:
  347. ret = generic_ptrace_pokedata(child, addr, data);
  348. break;
  349. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  350. {
  351. int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
  352. ret = -EIO;
  353. if ((addr & 7) ||
  354. addr > sizeof(struct user) - 7)
  355. break;
  356. switch (addr) {
  357. case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
  358. ret = putreg(child, addr, data);
  359. break;
  360. /* Disallows to set a breakpoint into the vsyscall */
  361. case offsetof(struct user, u_debugreg[0]):
  362. if (data >= TASK_SIZE_OF(child) - dsize) break;
  363. child->thread.debugreg0 = data;
  364. ret = 0;
  365. break;
  366. case offsetof(struct user, u_debugreg[1]):
  367. if (data >= TASK_SIZE_OF(child) - dsize) break;
  368. child->thread.debugreg1 = data;
  369. ret = 0;
  370. break;
  371. case offsetof(struct user, u_debugreg[2]):
  372. if (data >= TASK_SIZE_OF(child) - dsize) break;
  373. child->thread.debugreg2 = data;
  374. ret = 0;
  375. break;
  376. case offsetof(struct user, u_debugreg[3]):
  377. if (data >= TASK_SIZE_OF(child) - dsize) break;
  378. child->thread.debugreg3 = data;
  379. ret = 0;
  380. break;
  381. case offsetof(struct user, u_debugreg[6]):
  382. if (data >> 32)
  383. break;
  384. child->thread.debugreg6 = data;
  385. ret = 0;
  386. break;
  387. case offsetof(struct user, u_debugreg[7]):
  388. /* See arch/i386/kernel/ptrace.c for an explanation of
  389. * this awkward check.*/
  390. data &= ~DR_CONTROL_RESERVED;
  391. for(i=0; i<4; i++)
  392. if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
  393. break;
  394. if (i == 4) {
  395. child->thread.debugreg7 = data;
  396. if (data)
  397. set_tsk_thread_flag(child, TIF_DEBUG);
  398. else
  399. clear_tsk_thread_flag(child, TIF_DEBUG);
  400. ret = 0;
  401. }
  402. break;
  403. }
  404. break;
  405. }
  406. case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
  407. case PTRACE_CONT: /* restart after signal. */
  408. ret = -EIO;
  409. if (!valid_signal(data))
  410. break;
  411. if (request == PTRACE_SYSCALL)
  412. set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  413. else
  414. clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  415. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  416. child->exit_code = data;
  417. /* make sure the single step bit is not set. */
  418. clear_singlestep(child);
  419. wake_up_process(child);
  420. ret = 0;
  421. break;
  422. #ifdef CONFIG_IA32_EMULATION
  423. /* This makes only sense with 32bit programs. Allow a
  424. 64bit debugger to fully examine them too. Better
  425. don't use it against 64bit processes, use
  426. PTRACE_ARCH_PRCTL instead. */
  427. case PTRACE_GET_THREAD_AREA:
  428. if (addr < 0)
  429. return -EIO;
  430. ret = do_get_thread_area(child, addr,
  431. (struct user_desc __user *) data);
  432. break;
  433. case PTRACE_SET_THREAD_AREA:
  434. if (addr < 0)
  435. return -EIO;
  436. ret = do_set_thread_area(child, addr,
  437. (struct user_desc __user *) data, 0);
  438. break;
  439. #endif
  440. /* normal 64bit interface to access TLS data.
  441. Works just like arch_prctl, except that the arguments
  442. are reversed. */
  443. case PTRACE_ARCH_PRCTL:
  444. ret = do_arch_prctl(child, data, addr);
  445. break;
  446. /*
  447. * make the child exit. Best I can do is send it a sigkill.
  448. * perhaps it should be put in the status that it wants to
  449. * exit.
  450. */
  451. case PTRACE_KILL:
  452. ret = 0;
  453. if (child->exit_state == EXIT_ZOMBIE) /* already dead */
  454. break;
  455. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  456. child->exit_code = SIGKILL;
  457. /* make sure the single step bit is not set. */
  458. clear_singlestep(child);
  459. wake_up_process(child);
  460. break;
  461. case PTRACE_SINGLESTEP: /* set the trap flag. */
  462. ret = -EIO;
  463. if (!valid_signal(data))
  464. break;
  465. clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  466. set_singlestep(child);
  467. child->exit_code = data;
  468. /* give it a chance to run. */
  469. wake_up_process(child);
  470. ret = 0;
  471. break;
  472. case PTRACE_GETREGS: { /* Get all gp regs from the child. */
  473. if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
  474. sizeof(struct user_regs_struct))) {
  475. ret = -EIO;
  476. break;
  477. }
  478. ret = 0;
  479. for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
  480. ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
  481. data += sizeof(long);
  482. }
  483. break;
  484. }
  485. case PTRACE_SETREGS: { /* Set all gp regs in the child. */
  486. unsigned long tmp;
  487. if (!access_ok(VERIFY_READ, (unsigned __user *)data,
  488. sizeof(struct user_regs_struct))) {
  489. ret = -EIO;
  490. break;
  491. }
  492. ret = 0;
  493. for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
  494. ret = __get_user(tmp, (unsigned long __user *) data);
  495. if (ret)
  496. break;
  497. ret = putreg(child, ui, tmp);
  498. if (ret)
  499. break;
  500. data += sizeof(long);
  501. }
  502. break;
  503. }
  504. case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
  505. if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
  506. sizeof(struct user_i387_struct))) {
  507. ret = -EIO;
  508. break;
  509. }
  510. ret = get_fpregs((struct user_i387_struct __user *)data, child);
  511. break;
  512. }
  513. case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
  514. if (!access_ok(VERIFY_READ, (unsigned __user *)data,
  515. sizeof(struct user_i387_struct))) {
  516. ret = -EIO;
  517. break;
  518. }
  519. set_stopped_child_used_math(child);
  520. ret = set_fpregs(child, (struct user_i387_struct __user *)data);
  521. break;
  522. }
  523. default:
  524. ret = ptrace_request(child, request, addr, data);
  525. break;
  526. }
  527. return ret;
  528. }
  529. static void syscall_trace(struct pt_regs *regs)
  530. {
  531. #if 0
  532. printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
  533. current->comm,
  534. regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
  535. current_thread_info()->flags, current->ptrace);
  536. #endif
  537. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  538. ? 0x80 : 0));
  539. /*
  540. * this isn't the same as continuing with a signal, but it will do
  541. * for normal use. strace only continues with a signal if the
  542. * stopping signal is not SIGTRAP. -brl
  543. */
  544. if (current->exit_code) {
  545. send_sig(current->exit_code, current, 1);
  546. current->exit_code = 0;
  547. }
  548. }
  549. asmlinkage void syscall_trace_enter(struct pt_regs *regs)
  550. {
  551. /* do the secure computing check first */
  552. secure_computing(regs->orig_rax);
  553. if (test_thread_flag(TIF_SYSCALL_TRACE)
  554. && (current->ptrace & PT_PTRACED))
  555. syscall_trace(regs);
  556. if (unlikely(current->audit_context)) {
  557. if (test_thread_flag(TIF_IA32)) {
  558. audit_syscall_entry(AUDIT_ARCH_I386,
  559. regs->orig_rax,
  560. regs->rbx, regs->rcx,
  561. regs->rdx, regs->rsi);
  562. } else {
  563. audit_syscall_entry(AUDIT_ARCH_X86_64,
  564. regs->orig_rax,
  565. regs->rdi, regs->rsi,
  566. regs->rdx, regs->r10);
  567. }
  568. }
  569. }
  570. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  571. {
  572. if (unlikely(current->audit_context))
  573. audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
  574. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  575. || test_thread_flag(TIF_SINGLESTEP))
  576. && (current->ptrace & PT_PTRACED))
  577. syscall_trace(regs);
  578. }