ptrace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /*
  2. * linux/arch/m32r/kernel/ptrace.c
  3. *
  4. * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
  5. * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
  6. *
  7. * Original x86 implementation:
  8. * By Ross Biro 1/23/92
  9. * edited by Linus Torvalds
  10. *
  11. * Some code taken from sh version:
  12. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  13. * Some code taken from arm version:
  14. * Copyright (C) 2000 Russell King
  15. */
  16. #include <linux/config.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/errno.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/user.h>
  25. #include <linux/string.h>
  26. #include <linux/signal.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/io.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. /*
  35. * Get the address of the live pt_regs for the specified task.
  36. * These are saved onto the top kernel stack when the process
  37. * is not running.
  38. *
  39. * Note: if a user thread is execve'd from kernel space, the
  40. * kernel stack will not be empty on entry to the kernel, so
  41. * ptracing these tasks will fail.
  42. */
  43. static inline struct pt_regs *
  44. get_user_regs(struct task_struct *task)
  45. {
  46. return (struct pt_regs *)
  47. ((unsigned long)task->thread_info + THREAD_SIZE
  48. - sizeof(struct pt_regs));
  49. }
  50. /*
  51. * This routine will get a word off of the process kernel stack.
  52. */
  53. static inline unsigned long int
  54. get_stack_long(struct task_struct *task, int offset)
  55. {
  56. unsigned long *stack;
  57. stack = (unsigned long *)get_user_regs(task);
  58. return stack[offset];
  59. }
  60. /*
  61. * This routine will put a word on the process kernel stack.
  62. */
  63. static inline int
  64. put_stack_long(struct task_struct *task, int offset, unsigned long data)
  65. {
  66. unsigned long *stack;
  67. stack = (unsigned long *)get_user_regs(task);
  68. stack[offset] = data;
  69. return 0;
  70. }
  71. static int reg_offset[] = {
  72. PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
  73. PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
  74. };
  75. /*
  76. * Read the word at offset "off" into the "struct user". We
  77. * actually access the pt_regs stored on the kernel stack.
  78. */
  79. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  80. unsigned long __user *data)
  81. {
  82. unsigned long tmp;
  83. #ifndef NO_FPU
  84. struct user * dummy = NULL;
  85. #endif
  86. if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
  87. return -EIO;
  88. off >>= 2;
  89. switch (off) {
  90. case PT_EVB:
  91. __asm__ __volatile__ (
  92. "mvfc %0, cr5 \n\t"
  93. : "=r" (tmp)
  94. );
  95. break;
  96. case PT_CBR: {
  97. unsigned long psw;
  98. psw = get_stack_long(tsk, PT_PSW);
  99. tmp = ((psw >> 8) & 1);
  100. }
  101. break;
  102. case PT_PSW: {
  103. unsigned long psw, bbpsw;
  104. psw = get_stack_long(tsk, PT_PSW);
  105. bbpsw = get_stack_long(tsk, PT_BBPSW);
  106. tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
  107. }
  108. break;
  109. case PT_PC:
  110. tmp = get_stack_long(tsk, PT_BPC);
  111. break;
  112. case PT_BPC:
  113. off = PT_BBPC;
  114. /* fall through */
  115. default:
  116. if (off < (sizeof(struct pt_regs) >> 2))
  117. tmp = get_stack_long(tsk, off);
  118. #ifndef NO_FPU
  119. else if (off >= (long)(&dummy->fpu >> 2) &&
  120. off < (long)(&dummy->u_fpvalid >> 2)) {
  121. if (!tsk_used_math(tsk)) {
  122. if (off == (long)(&dummy->fpu.fpscr >> 2))
  123. tmp = FPSCR_INIT;
  124. else
  125. tmp = 0;
  126. } else
  127. tmp = ((long *)(&tsk->thread.fpu >> 2))
  128. [off - (long)&dummy->fpu];
  129. } else if (off == (long)(&dummy->u_fpvalid >> 2))
  130. tmp = !!tsk_used_math(tsk);
  131. #endif /* not NO_FPU */
  132. else
  133. tmp = 0;
  134. }
  135. return put_user(tmp, data);
  136. }
  137. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  138. unsigned long data)
  139. {
  140. int ret = -EIO;
  141. #ifndef NO_FPU
  142. struct user * dummy = NULL;
  143. #endif
  144. if ((off & 3) || off < 0 ||
  145. off > sizeof(struct user) - 3)
  146. return -EIO;
  147. off >>= 2;
  148. switch (off) {
  149. case PT_EVB:
  150. case PT_BPC:
  151. case PT_SPI:
  152. /* We don't allow to modify evb. */
  153. ret = 0;
  154. break;
  155. case PT_PSW:
  156. case PT_CBR: {
  157. /* We allow to modify only cbr in psw */
  158. unsigned long psw;
  159. psw = get_stack_long(tsk, PT_PSW);
  160. psw = (psw & ~0x100) | ((data & 1) << 8);
  161. ret = put_stack_long(tsk, PT_PSW, psw);
  162. }
  163. break;
  164. case PT_PC:
  165. off = PT_BPC;
  166. data &= ~1;
  167. /* fall through */
  168. default:
  169. if (off < (sizeof(struct pt_regs) >> 2))
  170. ret = put_stack_long(tsk, off, data);
  171. #ifndef NO_FPU
  172. else if (off >= (long)(&dummy->fpu >> 2) &&
  173. off < (long)(&dummy->u_fpvalid >> 2)) {
  174. set_stopped_child_used_math(tsk);
  175. ((long *)&tsk->thread.fpu)
  176. [off - (long)&dummy->fpu] = data;
  177. ret = 0;
  178. } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
  179. conditional_stopped_child_used_math(data, tsk);
  180. ret = 0;
  181. }
  182. #endif /* not NO_FPU */
  183. break;
  184. }
  185. return ret;
  186. }
  187. /*
  188. * Get all user integer registers.
  189. */
  190. static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
  191. {
  192. struct pt_regs *regs = get_user_regs(tsk);
  193. return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
  194. }
  195. /*
  196. * Set all user integer registers.
  197. */
  198. static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
  199. {
  200. struct pt_regs newregs;
  201. int ret;
  202. ret = -EFAULT;
  203. if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
  204. struct pt_regs *regs = get_user_regs(tsk);
  205. *regs = newregs;
  206. ret = 0;
  207. }
  208. return ret;
  209. }
  210. static inline int
  211. check_condition_bit(struct task_struct *child)
  212. {
  213. return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
  214. }
  215. static int
  216. check_condition_src(unsigned long op, unsigned long regno1,
  217. unsigned long regno2, struct task_struct *child)
  218. {
  219. unsigned long reg1, reg2;
  220. reg2 = get_stack_long(child, reg_offset[regno2]);
  221. switch (op) {
  222. case 0x0: /* BEQ */
  223. reg1 = get_stack_long(child, reg_offset[regno1]);
  224. return reg1 == reg2;
  225. case 0x1: /* BNE */
  226. reg1 = get_stack_long(child, reg_offset[regno1]);
  227. return reg1 != reg2;
  228. case 0x8: /* BEQZ */
  229. return reg2 == 0;
  230. case 0x9: /* BNEZ */
  231. return reg2 != 0;
  232. case 0xa: /* BLTZ */
  233. return (int)reg2 < 0;
  234. case 0xb: /* BGEZ */
  235. return (int)reg2 >= 0;
  236. case 0xc: /* BLEZ */
  237. return (int)reg2 <= 0;
  238. case 0xd: /* BGTZ */
  239. return (int)reg2 > 0;
  240. default:
  241. /* never reached */
  242. return 0;
  243. }
  244. }
  245. static void
  246. compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
  247. unsigned long *next_pc,
  248. struct task_struct *child)
  249. {
  250. unsigned long op, op2, op3;
  251. unsigned long disp;
  252. unsigned long regno;
  253. int parallel = 0;
  254. if (insn & 0x00008000)
  255. parallel = 1;
  256. if (pc & 3)
  257. insn &= 0x7fff; /* right slot */
  258. else
  259. insn >>= 16; /* left slot */
  260. op = (insn >> 12) & 0xf;
  261. op2 = (insn >> 8) & 0xf;
  262. op3 = (insn >> 4) & 0xf;
  263. if (op == 0x7) {
  264. switch (op2) {
  265. case 0xd: /* BNC */
  266. case 0x9: /* BNCL */
  267. if (!check_condition_bit(child)) {
  268. disp = (long)(insn << 24) >> 22;
  269. *next_pc = (pc & ~0x3) + disp;
  270. return;
  271. }
  272. break;
  273. case 0x8: /* BCL */
  274. case 0xc: /* BC */
  275. if (check_condition_bit(child)) {
  276. disp = (long)(insn << 24) >> 22;
  277. *next_pc = (pc & ~0x3) + disp;
  278. return;
  279. }
  280. break;
  281. case 0xe: /* BL */
  282. case 0xf: /* BRA */
  283. disp = (long)(insn << 24) >> 22;
  284. *next_pc = (pc & ~0x3) + disp;
  285. return;
  286. break;
  287. }
  288. } else if (op == 0x1) {
  289. switch (op2) {
  290. case 0x0:
  291. if (op3 == 0xf) { /* TRAP */
  292. #if 1
  293. /* pass through */
  294. #else
  295. /* kernel space is not allowed as next_pc */
  296. unsigned long evb;
  297. unsigned long trapno;
  298. trapno = insn & 0xf;
  299. __asm__ __volatile__ (
  300. "mvfc %0, cr5\n"
  301. :"=r"(evb)
  302. :
  303. );
  304. *next_pc = evb + (trapno << 2);
  305. return;
  306. #endif
  307. } else if (op3 == 0xd) { /* RTE */
  308. *next_pc = get_stack_long(child, PT_BPC);
  309. return;
  310. }
  311. break;
  312. case 0xc: /* JC */
  313. if (op3 == 0xc && check_condition_bit(child)) {
  314. regno = insn & 0xf;
  315. *next_pc = get_stack_long(child,
  316. reg_offset[regno]);
  317. return;
  318. }
  319. break;
  320. case 0xd: /* JNC */
  321. if (op3 == 0xc && !check_condition_bit(child)) {
  322. regno = insn & 0xf;
  323. *next_pc = get_stack_long(child,
  324. reg_offset[regno]);
  325. return;
  326. }
  327. break;
  328. case 0xe: /* JL */
  329. case 0xf: /* JMP */
  330. if (op3 == 0xc) { /* JMP */
  331. regno = insn & 0xf;
  332. *next_pc = get_stack_long(child,
  333. reg_offset[regno]);
  334. return;
  335. }
  336. break;
  337. }
  338. }
  339. if (parallel)
  340. *next_pc = pc + 4;
  341. else
  342. *next_pc = pc + 2;
  343. }
  344. static void
  345. compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
  346. unsigned long *next_pc,
  347. struct task_struct *child)
  348. {
  349. unsigned long op;
  350. unsigned long op2;
  351. unsigned long disp;
  352. unsigned long regno1, regno2;
  353. op = (insn >> 28) & 0xf;
  354. if (op == 0xf) { /* branch 24-bit relative */
  355. op2 = (insn >> 24) & 0xf;
  356. switch (op2) {
  357. case 0xd: /* BNC */
  358. case 0x9: /* BNCL */
  359. if (!check_condition_bit(child)) {
  360. disp = (long)(insn << 8) >> 6;
  361. *next_pc = (pc & ~0x3) + disp;
  362. return;
  363. }
  364. break;
  365. case 0x8: /* BCL */
  366. case 0xc: /* BC */
  367. if (check_condition_bit(child)) {
  368. disp = (long)(insn << 8) >> 6;
  369. *next_pc = (pc & ~0x3) + disp;
  370. return;
  371. }
  372. break;
  373. case 0xe: /* BL */
  374. case 0xf: /* BRA */
  375. disp = (long)(insn << 8) >> 6;
  376. *next_pc = (pc & ~0x3) + disp;
  377. return;
  378. }
  379. } else if (op == 0xb) { /* branch 16-bit relative */
  380. op2 = (insn >> 20) & 0xf;
  381. switch (op2) {
  382. case 0x0: /* BEQ */
  383. case 0x1: /* BNE */
  384. case 0x8: /* BEQZ */
  385. case 0x9: /* BNEZ */
  386. case 0xa: /* BLTZ */
  387. case 0xb: /* BGEZ */
  388. case 0xc: /* BLEZ */
  389. case 0xd: /* BGTZ */
  390. regno1 = ((insn >> 24) & 0xf);
  391. regno2 = ((insn >> 16) & 0xf);
  392. if (check_condition_src(op2, regno1, regno2, child)) {
  393. disp = (long)(insn << 16) >> 14;
  394. *next_pc = (pc & ~0x3) + disp;
  395. return;
  396. }
  397. break;
  398. }
  399. }
  400. *next_pc = pc + 4;
  401. }
  402. static inline void
  403. compute_next_pc(unsigned long insn, unsigned long pc,
  404. unsigned long *next_pc, struct task_struct *child)
  405. {
  406. if (insn & 0x80000000)
  407. compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
  408. else
  409. compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
  410. }
  411. static int
  412. register_debug_trap(struct task_struct *child, unsigned long next_pc,
  413. unsigned long next_insn, unsigned long *code)
  414. {
  415. struct debug_trap *p = &child->thread.debug_trap;
  416. unsigned long addr = next_pc & ~3;
  417. if (p->nr_trap == MAX_TRAPS) {
  418. printk("kernel BUG at %s %d: p->nr_trap = %d\n",
  419. __FILE__, __LINE__, p->nr_trap);
  420. return -1;
  421. }
  422. p->addr[p->nr_trap] = addr;
  423. p->insn[p->nr_trap] = next_insn;
  424. p->nr_trap++;
  425. if (next_pc & 3) {
  426. *code = (next_insn & 0xffff0000) | 0x10f1;
  427. /* xxx --> TRAP1 */
  428. } else {
  429. if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
  430. *code = 0x10f17000;
  431. /* TRAP1 --> NOP */
  432. } else {
  433. *code = (next_insn & 0xffff) | 0x10f10000;
  434. /* TRAP1 --> xxx */
  435. }
  436. }
  437. return 0;
  438. }
  439. static int
  440. unregister_debug_trap(struct task_struct *child, unsigned long addr,
  441. unsigned long *code)
  442. {
  443. struct debug_trap *p = &child->thread.debug_trap;
  444. int i;
  445. /* Search debug trap entry. */
  446. for (i = 0; i < p->nr_trap; i++) {
  447. if (p->addr[i] == addr)
  448. break;
  449. }
  450. if (i >= p->nr_trap) {
  451. /* The trap may be requested from debugger.
  452. * ptrace should do nothing in this case.
  453. */
  454. return 0;
  455. }
  456. /* Recover orignal instruction code. */
  457. *code = p->insn[i];
  458. /* Shift debug trap entries. */
  459. while (i < p->nr_trap - 1) {
  460. p->insn[i] = p->insn[i + 1];
  461. p->addr[i] = p->addr[i + 1];
  462. i++;
  463. }
  464. p->nr_trap--;
  465. return 1;
  466. }
  467. static void
  468. unregister_all_debug_traps(struct task_struct *child)
  469. {
  470. struct debug_trap *p = &child->thread.debug_trap;
  471. int i;
  472. for (i = 0; i < p->nr_trap; i++)
  473. access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
  474. p->nr_trap = 0;
  475. }
  476. static inline void
  477. invalidate_cache(void)
  478. {
  479. #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
  480. _flush_cache_copyback_all();
  481. #else /* ! CONFIG_CHIP_M32700 */
  482. /* Invalidate cache */
  483. __asm__ __volatile__ (
  484. "ldi r0, #-1 \n\t"
  485. "ldi r1, #0 \n\t"
  486. "stb r1, @r0 ; cache off \n\t"
  487. "; \n\t"
  488. "ldi r0, #-2 \n\t"
  489. "ldi r1, #1 \n\t"
  490. "stb r1, @r0 ; cache invalidate \n\t"
  491. ".fillinsn \n"
  492. "0: \n\t"
  493. "ldb r1, @r0 ; invalidate check \n\t"
  494. "bnez r1, 0b \n\t"
  495. "; \n\t"
  496. "ldi r0, #-1 \n\t"
  497. "ldi r1, #1 \n\t"
  498. "stb r1, @r0 ; cache on \n\t"
  499. : : : "r0", "r1", "memory"
  500. );
  501. /* FIXME: copying-back d-cache and invalidating i-cache are needed.
  502. */
  503. #endif /* CONFIG_CHIP_M32700 */
  504. }
  505. /* Embed a debug trap (TRAP1) code */
  506. static int
  507. embed_debug_trap(struct task_struct *child, unsigned long next_pc)
  508. {
  509. unsigned long next_insn, code;
  510. unsigned long addr = next_pc & ~3;
  511. if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
  512. != sizeof(next_insn)) {
  513. return -1; /* error */
  514. }
  515. /* Set a trap code. */
  516. if (register_debug_trap(child, next_pc, next_insn, &code)) {
  517. return -1; /* error */
  518. }
  519. if (access_process_vm(child, addr, &code, sizeof(code), 1)
  520. != sizeof(code)) {
  521. return -1; /* error */
  522. }
  523. return 0; /* success */
  524. }
  525. void
  526. withdraw_debug_trap(struct pt_regs *regs)
  527. {
  528. unsigned long addr;
  529. unsigned long code;
  530. addr = (regs->bpc - 2) & ~3;
  531. regs->bpc -= 2;
  532. if (unregister_debug_trap(current, addr, &code)) {
  533. access_process_vm(current, addr, &code, sizeof(code), 1);
  534. invalidate_cache();
  535. }
  536. }
  537. static void
  538. init_debug_traps(struct task_struct *child)
  539. {
  540. struct debug_trap *p = &child->thread.debug_trap;
  541. int i;
  542. p->nr_trap = 0;
  543. for (i = 0; i < MAX_TRAPS; i++) {
  544. p->addr[i] = 0;
  545. p->insn[i] = 0;
  546. }
  547. }
  548. /*
  549. * Called by kernel/ptrace.c when detaching..
  550. *
  551. * Make sure single step bits etc are not set.
  552. */
  553. void ptrace_disable(struct task_struct *child)
  554. {
  555. /* nothing to do.. */
  556. }
  557. static int
  558. do_ptrace(long request, struct task_struct *child, long addr, long data)
  559. {
  560. unsigned long tmp;
  561. int ret;
  562. switch (request) {
  563. /*
  564. * read word at location "addr" in the child process.
  565. */
  566. case PTRACE_PEEKTEXT:
  567. case PTRACE_PEEKDATA:
  568. ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
  569. if (ret == sizeof(tmp))
  570. ret = put_user(tmp,(unsigned long __user *) data);
  571. else
  572. ret = -EIO;
  573. break;
  574. /*
  575. * read the word at location addr in the USER area.
  576. */
  577. case PTRACE_PEEKUSR:
  578. ret = ptrace_read_user(child, addr,
  579. (unsigned long __user *)data);
  580. break;
  581. /*
  582. * write the word at location addr.
  583. */
  584. case PTRACE_POKETEXT:
  585. case PTRACE_POKEDATA:
  586. ret = access_process_vm(child, addr, &data, sizeof(data), 1);
  587. if (ret == sizeof(data)) {
  588. ret = 0;
  589. if (request == PTRACE_POKETEXT) {
  590. invalidate_cache();
  591. }
  592. } else {
  593. ret = -EIO;
  594. }
  595. break;
  596. /*
  597. * write the word at location addr in the USER area.
  598. */
  599. case PTRACE_POKEUSR:
  600. ret = ptrace_write_user(child, addr, data);
  601. break;
  602. /*
  603. * continue/restart and stop at next (return from) syscall
  604. */
  605. case PTRACE_SYSCALL:
  606. case PTRACE_CONT:
  607. ret = -EIO;
  608. if (!valid_signal(data))
  609. break;
  610. if (request == PTRACE_SYSCALL)
  611. set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  612. else
  613. clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  614. child->exit_code = data;
  615. wake_up_process(child);
  616. ret = 0;
  617. break;
  618. /*
  619. * make the child exit. Best I can do is send it a sigkill.
  620. * perhaps it should be put in the status that it wants to
  621. * exit.
  622. */
  623. case PTRACE_KILL: {
  624. ret = 0;
  625. unregister_all_debug_traps(child);
  626. invalidate_cache();
  627. if (child->exit_state == EXIT_ZOMBIE) /* already dead */
  628. break;
  629. child->exit_code = SIGKILL;
  630. wake_up_process(child);
  631. break;
  632. }
  633. /*
  634. * execute single instruction.
  635. */
  636. case PTRACE_SINGLESTEP: {
  637. unsigned long next_pc;
  638. unsigned long pc, insn;
  639. ret = -EIO;
  640. if (!valid_signal(data))
  641. break;
  642. clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  643. if ((child->ptrace & PT_DTRACE) == 0) {
  644. /* Spurious delayed TF traps may occur */
  645. child->ptrace |= PT_DTRACE;
  646. }
  647. /* Compute next pc. */
  648. pc = get_stack_long(child, PT_BPC);
  649. if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
  650. != sizeof(insn))
  651. break;
  652. compute_next_pc(insn, pc, &next_pc, child);
  653. if (next_pc & 0x80000000)
  654. break;
  655. if (embed_debug_trap(child, next_pc))
  656. break;
  657. invalidate_cache();
  658. child->exit_code = data;
  659. /* give it a chance to run. */
  660. wake_up_process(child);
  661. ret = 0;
  662. break;
  663. }
  664. /*
  665. * detach a process that was attached.
  666. */
  667. case PTRACE_DETACH:
  668. ret = 0;
  669. ret = ptrace_detach(child, data);
  670. break;
  671. case PTRACE_GETREGS:
  672. ret = ptrace_getregs(child, (void __user *)data);
  673. break;
  674. case PTRACE_SETREGS:
  675. ret = ptrace_setregs(child, (void __user *)data);
  676. break;
  677. default:
  678. ret = ptrace_request(child, request, addr, data);
  679. break;
  680. }
  681. return ret;
  682. }
  683. asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
  684. {
  685. struct task_struct *child;
  686. int ret;
  687. lock_kernel();
  688. ret = -EPERM;
  689. if (request == PTRACE_TRACEME) {
  690. /* are we already being traced? */
  691. if (current->ptrace & PT_PTRACED)
  692. goto out;
  693. /* set the ptrace bit in the process flags. */
  694. current->ptrace |= PT_PTRACED;
  695. ret = 0;
  696. goto out;
  697. }
  698. ret = -ESRCH;
  699. read_lock(&tasklist_lock);
  700. child = find_task_by_pid(pid);
  701. if (child)
  702. get_task_struct(child);
  703. read_unlock(&tasklist_lock);
  704. if (!child)
  705. goto out;
  706. ret = -EPERM;
  707. if (pid == 1) /* you may not mess with init */
  708. goto out;
  709. if (request == PTRACE_ATTACH) {
  710. ret = ptrace_attach(child);
  711. if (ret == 0)
  712. init_debug_traps(child);
  713. goto out_tsk;
  714. }
  715. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  716. if (ret == 0)
  717. ret = do_ptrace(request, child, addr, data);
  718. out_tsk:
  719. put_task_struct(child);
  720. out:
  721. unlock_kernel();
  722. return ret;
  723. }
  724. /* notification of system call entry/exit
  725. * - triggered by current->work.syscall_trace
  726. */
  727. void do_syscall_trace(void)
  728. {
  729. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  730. return;
  731. if (!(current->ptrace & PT_PTRACED))
  732. return;
  733. /* the 0x80 provides a way for the tracing parent to distinguish
  734. between a syscall stop and SIGTRAP delivery */
  735. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  736. ? 0x80 : 0));
  737. /*
  738. * this isn't the same as continuing with a signal, but it will do
  739. * for normal use. strace only continues with a signal if the
  740. * stopping signal is not SIGTRAP. -brl
  741. */
  742. if (current->exit_code) {
  743. send_sig(current->exit_code, current, 1);
  744. current->exit_code = 0;
  745. }
  746. }