ptrace.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /* ptrace.c */
  2. /* By Ross Biro 1/23/92 */
  3. /*
  4. * Pentium III FXSR, SSE support
  5. * Gareth Hughes <gareth@valinux.com>, May 2000
  6. *
  7. * x86-64 port 2000-2002 Andi Kleen
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/errno.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/user.h>
  17. #include <linux/security.h>
  18. #include <linux/audit.h>
  19. #include <linux/seccomp.h>
  20. #include <linux/signal.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/system.h>
  24. #include <asm/processor.h>
  25. #include <asm/i387.h>
  26. #include <asm/debugreg.h>
  27. #include <asm/ldt.h>
  28. #include <asm/desc.h>
  29. #include <asm/proto.h>
  30. #include <asm/ia32.h>
  31. /*
  32. * does not yet catch signals sent when the child dies.
  33. * in exit.c or in signal.c.
  34. */
  35. /* determines which flags the user has access to. */
  36. /* 1 = access 0 = no access */
  37. #define FLAG_MASK 0x44dd5UL
  38. /* set's the trap flag. */
  39. #define TRAP_FLAG 0x100UL
  40. /*
  41. * eflags and offset of eflags on child stack..
  42. */
  43. #define EFLAGS offsetof(struct pt_regs, eflags)
  44. #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
  45. /*
  46. * this routine will get a word off of the processes privileged stack.
  47. * the offset is how far from the base addr as stored in the TSS.
  48. * this routine assumes that all the privileged stacks are in our
  49. * data space.
  50. */
  51. static inline unsigned long get_stack_long(struct task_struct *task, int offset)
  52. {
  53. unsigned char *stack;
  54. stack = (unsigned char *)task->thread.rsp0;
  55. stack += offset;
  56. return (*((unsigned long *)stack));
  57. }
  58. static inline struct pt_regs *get_child_regs(struct task_struct *task)
  59. {
  60. struct pt_regs *regs = (void *)task->thread.rsp0;
  61. return regs - 1;
  62. }
  63. /*
  64. * this routine will put a word on the processes privileged stack.
  65. * the offset is how far from the base addr as stored in the TSS.
  66. * this routine assumes that all the privileged stacks are in our
  67. * data space.
  68. */
  69. static inline long put_stack_long(struct task_struct *task, int offset,
  70. unsigned long data)
  71. {
  72. unsigned char * stack;
  73. stack = (unsigned char *) task->thread.rsp0;
  74. stack += offset;
  75. *(unsigned long *) stack = data;
  76. return 0;
  77. }
  78. #define LDT_SEGMENT 4
  79. unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
  80. {
  81. unsigned long addr, seg;
  82. addr = regs->rip;
  83. seg = regs->cs & 0xffff;
  84. /*
  85. * We'll assume that the code segments in the GDT
  86. * are all zero-based. That is largely true: the
  87. * TLS segments are used for data, and the PNPBIOS
  88. * and APM bios ones we just ignore here.
  89. */
  90. if (seg & LDT_SEGMENT) {
  91. u32 *desc;
  92. unsigned long base;
  93. down(&child->mm->context.sem);
  94. desc = child->mm->context.ldt + (seg & ~7);
  95. base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
  96. /* 16-bit code segment? */
  97. if (!((desc[1] >> 22) & 1))
  98. addr &= 0xffff;
  99. addr += base;
  100. up(&child->mm->context.sem);
  101. }
  102. return addr;
  103. }
  104. static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
  105. {
  106. int i, copied;
  107. unsigned char opcode[16];
  108. unsigned long addr = convert_rip_to_linear(child, regs);
  109. copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
  110. for (i = 0; i < copied; i++) {
  111. switch (opcode[i]) {
  112. /* popf */
  113. case 0x9d:
  114. return 1;
  115. /* CHECKME: 64 65 */
  116. /* opcode and address size prefixes */
  117. case 0x66: case 0x67:
  118. continue;
  119. /* irrelevant prefixes (segment overrides and repeats) */
  120. case 0x26: case 0x2e:
  121. case 0x36: case 0x3e:
  122. case 0x64: case 0x65:
  123. case 0xf0: case 0xf2: case 0xf3:
  124. continue;
  125. /* REX prefixes */
  126. case 0x40 ... 0x4f:
  127. continue;
  128. /* CHECKME: f0, f2, f3 */
  129. /*
  130. * pushf: NOTE! We should probably not let
  131. * the user see the TF bit being set. But
  132. * it's more pain than it's worth to avoid
  133. * it, and a debugger could emulate this
  134. * all in user space if it _really_ cares.
  135. */
  136. case 0x9c:
  137. default:
  138. return 0;
  139. }
  140. }
  141. return 0;
  142. }
  143. static void set_singlestep(struct task_struct *child)
  144. {
  145. struct pt_regs *regs = get_child_regs(child);
  146. /*
  147. * Always set TIF_SINGLESTEP - this guarantees that
  148. * we single-step system calls etc.. This will also
  149. * cause us to set TF when returning to user mode.
  150. */
  151. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  152. /*
  153. * If TF was already set, don't do anything else
  154. */
  155. if (regs->eflags & TRAP_FLAG)
  156. return;
  157. /* Set TF on the kernel stack.. */
  158. regs->eflags |= TRAP_FLAG;
  159. /*
  160. * ..but if TF is changed by the instruction we will trace,
  161. * don't mark it as being "us" that set it, so that we
  162. * won't clear it by hand later.
  163. *
  164. * AK: this is not enough, LAHF and IRET can change TF in user space too.
  165. */
  166. if (is_at_popf(child, regs))
  167. return;
  168. child->ptrace |= PT_DTRACE;
  169. }
  170. static void clear_singlestep(struct task_struct *child)
  171. {
  172. /* Always clear TIF_SINGLESTEP... */
  173. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  174. /* But touch TF only if it was set by us.. */
  175. if (child->ptrace & PT_DTRACE) {
  176. struct pt_regs *regs = get_child_regs(child);
  177. regs->eflags &= ~TRAP_FLAG;
  178. child->ptrace &= ~PT_DTRACE;
  179. }
  180. }
  181. /*
  182. * Called by kernel/ptrace.c when detaching..
  183. *
  184. * Make sure the single step bit is not set.
  185. */
  186. void ptrace_disable(struct task_struct *child)
  187. {
  188. clear_singlestep(child);
  189. }
  190. static int putreg(struct task_struct *child,
  191. unsigned long regno, unsigned long value)
  192. {
  193. unsigned long tmp;
  194. /* Some code in the 64bit emulation may not be 64bit clean.
  195. Don't take any chances. */
  196. if (test_tsk_thread_flag(child, TIF_IA32))
  197. value &= 0xffffffff;
  198. switch (regno) {
  199. case offsetof(struct user_regs_struct,fs):
  200. if (value && (value & 3) != 3)
  201. return -EIO;
  202. child->thread.fsindex = value & 0xffff;
  203. return 0;
  204. case offsetof(struct user_regs_struct,gs):
  205. if (value && (value & 3) != 3)
  206. return -EIO;
  207. child->thread.gsindex = value & 0xffff;
  208. return 0;
  209. case offsetof(struct user_regs_struct,ds):
  210. if (value && (value & 3) != 3)
  211. return -EIO;
  212. child->thread.ds = value & 0xffff;
  213. return 0;
  214. case offsetof(struct user_regs_struct,es):
  215. if (value && (value & 3) != 3)
  216. return -EIO;
  217. child->thread.es = value & 0xffff;
  218. return 0;
  219. case offsetof(struct user_regs_struct,ss):
  220. if ((value & 3) != 3)
  221. return -EIO;
  222. value &= 0xffff;
  223. return 0;
  224. case offsetof(struct user_regs_struct,fs_base):
  225. if (value >= TASK_SIZE_OF(child))
  226. return -EIO;
  227. child->thread.fs = value;
  228. return 0;
  229. case offsetof(struct user_regs_struct,gs_base):
  230. if (value >= TASK_SIZE_OF(child))
  231. return -EIO;
  232. child->thread.gs = value;
  233. return 0;
  234. case offsetof(struct user_regs_struct, eflags):
  235. value &= FLAG_MASK;
  236. tmp = get_stack_long(child, EFL_OFFSET);
  237. tmp &= ~FLAG_MASK;
  238. value |= tmp;
  239. break;
  240. case offsetof(struct user_regs_struct,cs):
  241. if ((value & 3) != 3)
  242. return -EIO;
  243. value &= 0xffff;
  244. break;
  245. case offsetof(struct user_regs_struct, rip):
  246. /* Check if the new RIP address is canonical */
  247. if (value >= TASK_SIZE_OF(child))
  248. return -EIO;
  249. break;
  250. }
  251. put_stack_long(child, regno - sizeof(struct pt_regs), value);
  252. return 0;
  253. }
  254. static unsigned long getreg(struct task_struct *child, unsigned long regno)
  255. {
  256. unsigned long val;
  257. switch (regno) {
  258. case offsetof(struct user_regs_struct, fs):
  259. return child->thread.fsindex;
  260. case offsetof(struct user_regs_struct, gs):
  261. return child->thread.gsindex;
  262. case offsetof(struct user_regs_struct, ds):
  263. return child->thread.ds;
  264. case offsetof(struct user_regs_struct, es):
  265. return child->thread.es;
  266. case offsetof(struct user_regs_struct, fs_base):
  267. return child->thread.fs;
  268. case offsetof(struct user_regs_struct, gs_base):
  269. return child->thread.gs;
  270. default:
  271. regno = regno - sizeof(struct pt_regs);
  272. val = get_stack_long(child, regno);
  273. if (test_tsk_thread_flag(child, TIF_IA32))
  274. val &= 0xffffffff;
  275. return val;
  276. }
  277. }
  278. asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data)
  279. {
  280. struct task_struct *child;
  281. long i, ret;
  282. unsigned ui;
  283. /* This lock_kernel fixes a subtle race with suid exec */
  284. lock_kernel();
  285. ret = -EPERM;
  286. if (request == PTRACE_TRACEME) {
  287. /* are we already being traced? */
  288. if (current->ptrace & PT_PTRACED)
  289. goto out;
  290. ret = security_ptrace(current->parent, current);
  291. if (ret)
  292. goto out;
  293. /* set the ptrace bit in the process flags. */
  294. current->ptrace |= PT_PTRACED;
  295. ret = 0;
  296. goto out;
  297. }
  298. ret = -ESRCH;
  299. read_lock(&tasklist_lock);
  300. child = find_task_by_pid(pid);
  301. if (child)
  302. get_task_struct(child);
  303. read_unlock(&tasklist_lock);
  304. if (!child)
  305. goto out;
  306. ret = -EPERM;
  307. if (pid == 1) /* you may not mess with init */
  308. goto out_tsk;
  309. if (request == PTRACE_ATTACH) {
  310. ret = ptrace_attach(child);
  311. goto out_tsk;
  312. }
  313. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  314. if (ret < 0)
  315. goto out_tsk;
  316. switch (request) {
  317. /* when I and D space are separate, these will need to be fixed. */
  318. case PTRACE_PEEKTEXT: /* read word at location addr. */
  319. case PTRACE_PEEKDATA: {
  320. unsigned long tmp;
  321. int copied;
  322. copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
  323. ret = -EIO;
  324. if (copied != sizeof(tmp))
  325. break;
  326. ret = put_user(tmp,(unsigned long __user *) data);
  327. break;
  328. }
  329. /* read the word at location addr in the USER area. */
  330. case PTRACE_PEEKUSR: {
  331. unsigned long tmp;
  332. ret = -EIO;
  333. if ((addr & 7) ||
  334. addr > sizeof(struct user) - 7)
  335. break;
  336. switch (addr) {
  337. case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
  338. tmp = getreg(child, addr);
  339. break;
  340. case offsetof(struct user, u_debugreg[0]):
  341. tmp = child->thread.debugreg0;
  342. break;
  343. case offsetof(struct user, u_debugreg[1]):
  344. tmp = child->thread.debugreg1;
  345. break;
  346. case offsetof(struct user, u_debugreg[2]):
  347. tmp = child->thread.debugreg2;
  348. break;
  349. case offsetof(struct user, u_debugreg[3]):
  350. tmp = child->thread.debugreg3;
  351. break;
  352. case offsetof(struct user, u_debugreg[6]):
  353. tmp = child->thread.debugreg6;
  354. break;
  355. case offsetof(struct user, u_debugreg[7]):
  356. tmp = child->thread.debugreg7;
  357. break;
  358. default:
  359. tmp = 0;
  360. break;
  361. }
  362. ret = put_user(tmp,(unsigned long __user *) data);
  363. break;
  364. }
  365. /* when I and D space are separate, this will have to be fixed. */
  366. case PTRACE_POKETEXT: /* write the word at location addr. */
  367. case PTRACE_POKEDATA:
  368. ret = 0;
  369. if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
  370. break;
  371. ret = -EIO;
  372. break;
  373. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  374. {
  375. int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
  376. ret = -EIO;
  377. if ((addr & 7) ||
  378. addr > sizeof(struct user) - 7)
  379. break;
  380. switch (addr) {
  381. case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
  382. ret = putreg(child, addr, data);
  383. break;
  384. /* Disallows to set a breakpoint into the vsyscall */
  385. case offsetof(struct user, u_debugreg[0]):
  386. if (data >= TASK_SIZE_OF(child) - dsize) break;
  387. child->thread.debugreg0 = data;
  388. ret = 0;
  389. break;
  390. case offsetof(struct user, u_debugreg[1]):
  391. if (data >= TASK_SIZE_OF(child) - dsize) break;
  392. child->thread.debugreg1 = data;
  393. ret = 0;
  394. break;
  395. case offsetof(struct user, u_debugreg[2]):
  396. if (data >= TASK_SIZE_OF(child) - dsize) break;
  397. child->thread.debugreg2 = data;
  398. ret = 0;
  399. break;
  400. case offsetof(struct user, u_debugreg[3]):
  401. if (data >= TASK_SIZE_OF(child) - dsize) break;
  402. child->thread.debugreg3 = data;
  403. ret = 0;
  404. break;
  405. case offsetof(struct user, u_debugreg[6]):
  406. if (data >> 32)
  407. break;
  408. child->thread.debugreg6 = data;
  409. ret = 0;
  410. break;
  411. case offsetof(struct user, u_debugreg[7]):
  412. /* See arch/i386/kernel/ptrace.c for an explanation of
  413. * this awkward check.*/
  414. data &= ~DR_CONTROL_RESERVED;
  415. for(i=0; i<4; i++)
  416. if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
  417. break;
  418. if (i == 4) {
  419. child->thread.debugreg7 = data;
  420. ret = 0;
  421. }
  422. break;
  423. }
  424. break;
  425. }
  426. case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
  427. case PTRACE_CONT: /* restart after signal. */
  428. ret = -EIO;
  429. if (!valid_signal(data))
  430. break;
  431. if (request == PTRACE_SYSCALL)
  432. set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  433. else
  434. clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  435. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  436. child->exit_code = data;
  437. /* make sure the single step bit is not set. */
  438. clear_singlestep(child);
  439. wake_up_process(child);
  440. ret = 0;
  441. break;
  442. #ifdef CONFIG_IA32_EMULATION
  443. /* This makes only sense with 32bit programs. Allow a
  444. 64bit debugger to fully examine them too. Better
  445. don't use it against 64bit processes, use
  446. PTRACE_ARCH_PRCTL instead. */
  447. case PTRACE_SET_THREAD_AREA: {
  448. struct user_desc __user *p;
  449. int old;
  450. p = (struct user_desc __user *)data;
  451. get_user(old, &p->entry_number);
  452. put_user(addr, &p->entry_number);
  453. ret = do_set_thread_area(&child->thread, p);
  454. put_user(old, &p->entry_number);
  455. break;
  456. case PTRACE_GET_THREAD_AREA:
  457. p = (struct user_desc __user *)data;
  458. get_user(old, &p->entry_number);
  459. put_user(addr, &p->entry_number);
  460. ret = do_get_thread_area(&child->thread, p);
  461. put_user(old, &p->entry_number);
  462. break;
  463. }
  464. #endif
  465. /* normal 64bit interface to access TLS data.
  466. Works just like arch_prctl, except that the arguments
  467. are reversed. */
  468. case PTRACE_ARCH_PRCTL:
  469. ret = do_arch_prctl(child, data, addr);
  470. break;
  471. /*
  472. * make the child exit. Best I can do is send it a sigkill.
  473. * perhaps it should be put in the status that it wants to
  474. * exit.
  475. */
  476. case PTRACE_KILL:
  477. ret = 0;
  478. if (child->exit_state == EXIT_ZOMBIE) /* already dead */
  479. break;
  480. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  481. child->exit_code = SIGKILL;
  482. /* make sure the single step bit is not set. */
  483. clear_singlestep(child);
  484. wake_up_process(child);
  485. break;
  486. case PTRACE_SINGLESTEP: /* set the trap flag. */
  487. ret = -EIO;
  488. if (!valid_signal(data))
  489. break;
  490. clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
  491. set_singlestep(child);
  492. child->exit_code = data;
  493. /* give it a chance to run. */
  494. wake_up_process(child);
  495. ret = 0;
  496. break;
  497. case PTRACE_DETACH:
  498. /* detach a process that was attached. */
  499. ret = ptrace_detach(child, data);
  500. break;
  501. case PTRACE_GETREGS: { /* Get all gp regs from the child. */
  502. if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
  503. sizeof(struct user_regs_struct))) {
  504. ret = -EIO;
  505. break;
  506. }
  507. ret = 0;
  508. for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
  509. ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
  510. data += sizeof(long);
  511. }
  512. break;
  513. }
  514. case PTRACE_SETREGS: { /* Set all gp regs in the child. */
  515. unsigned long tmp;
  516. if (!access_ok(VERIFY_READ, (unsigned __user *)data,
  517. sizeof(struct user_regs_struct))) {
  518. ret = -EIO;
  519. break;
  520. }
  521. ret = 0;
  522. for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
  523. ret |= __get_user(tmp, (unsigned long __user *) data);
  524. putreg(child, ui, tmp);
  525. data += sizeof(long);
  526. }
  527. break;
  528. }
  529. case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
  530. if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
  531. sizeof(struct user_i387_struct))) {
  532. ret = -EIO;
  533. break;
  534. }
  535. ret = get_fpregs((struct user_i387_struct __user *)data, child);
  536. break;
  537. }
  538. case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
  539. if (!access_ok(VERIFY_READ, (unsigned __user *)data,
  540. sizeof(struct user_i387_struct))) {
  541. ret = -EIO;
  542. break;
  543. }
  544. set_stopped_child_used_math(child);
  545. ret = set_fpregs(child, (struct user_i387_struct __user *)data);
  546. break;
  547. }
  548. default:
  549. ret = ptrace_request(child, request, addr, data);
  550. break;
  551. }
  552. out_tsk:
  553. put_task_struct(child);
  554. out:
  555. unlock_kernel();
  556. return ret;
  557. }
  558. static void syscall_trace(struct pt_regs *regs)
  559. {
  560. #if 0
  561. printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
  562. current->comm,
  563. regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
  564. current_thread_info()->flags, current->ptrace);
  565. #endif
  566. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  567. ? 0x80 : 0));
  568. /*
  569. * this isn't the same as continuing with a signal, but it will do
  570. * for normal use. strace only continues with a signal if the
  571. * stopping signal is not SIGTRAP. -brl
  572. */
  573. if (current->exit_code) {
  574. send_sig(current->exit_code, current, 1);
  575. current->exit_code = 0;
  576. }
  577. }
  578. asmlinkage void syscall_trace_enter(struct pt_regs *regs)
  579. {
  580. /* do the secure computing check first */
  581. secure_computing(regs->orig_rax);
  582. if (test_thread_flag(TIF_SYSCALL_TRACE)
  583. && (current->ptrace & PT_PTRACED))
  584. syscall_trace(regs);
  585. if (unlikely(current->audit_context)) {
  586. if (test_thread_flag(TIF_IA32)) {
  587. audit_syscall_entry(current, AUDIT_ARCH_I386,
  588. regs->orig_rax,
  589. regs->rbx, regs->rcx,
  590. regs->rdx, regs->rsi);
  591. } else {
  592. audit_syscall_entry(current, AUDIT_ARCH_X86_64,
  593. regs->orig_rax,
  594. regs->rdi, regs->rsi,
  595. regs->rdx, regs->r10);
  596. }
  597. }
  598. }
  599. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  600. {
  601. if (unlikely(current->audit_context))
  602. audit_syscall_exit(current, AUDITSC_RESULT(regs->rax), regs->rax);
  603. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  604. || test_thread_flag(TIF_SINGLESTEP))
  605. && (current->ptrace & PT_PTRACED))
  606. syscall_trace(regs);
  607. }