ptrace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /*
  2. * linux/arch/m32r/kernel/ptrace.c
  3. *
  4. * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
  5. * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
  6. *
  7. * Original x86 implementation:
  8. * By Ross Biro 1/23/92
  9. * edited by Linus Torvalds
  10. *
  11. * Some code taken from sh version:
  12. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  13. * Some code taken from arm version:
  14. * Copyright (C) 2000 Russell King
  15. */
  16. #include <linux/config.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/errno.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/user.h>
  25. #include <linux/string.h>
  26. #include <asm/cacheflush.h>
  27. #include <asm/io.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/system.h>
  31. #include <asm/processor.h>
  32. #include <asm/mmu_context.h>
  33. /*
  34. * Get the address of the live pt_regs for the specified task.
  35. * These are saved onto the top kernel stack when the process
  36. * is not running.
  37. *
  38. * Note: if a user thread is execve'd from kernel space, the
  39. * kernel stack will not be empty on entry to the kernel, so
  40. * ptracing these tasks will fail.
  41. */
  42. static inline struct pt_regs *
  43. get_user_regs(struct task_struct *task)
  44. {
  45. return (struct pt_regs *)
  46. ((unsigned long)task->thread_info + THREAD_SIZE
  47. - sizeof(struct pt_regs));
  48. }
  49. /*
  50. * This routine will get a word off of the process kernel stack.
  51. */
  52. static inline unsigned long int
  53. get_stack_long(struct task_struct *task, int offset)
  54. {
  55. unsigned long *stack;
  56. stack = (unsigned long *)get_user_regs(task);
  57. return stack[offset];
  58. }
  59. /*
  60. * This routine will put a word on the process kernel stack.
  61. */
  62. static inline int
  63. put_stack_long(struct task_struct *task, int offset, unsigned long data)
  64. {
  65. unsigned long *stack;
  66. stack = (unsigned long *)get_user_regs(task);
  67. stack[offset] = data;
  68. return 0;
  69. }
  70. static int reg_offset[] = {
  71. PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
  72. PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
  73. };
  74. /*
  75. * Read the word at offset "off" into the "struct user". We
  76. * actually access the pt_regs stored on the kernel stack.
  77. */
  78. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  79. unsigned long __user *data)
  80. {
  81. unsigned long tmp;
  82. #ifndef NO_FPU
  83. struct user * dummy = NULL;
  84. #endif
  85. if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
  86. return -EIO;
  87. off >>= 2;
  88. switch (off) {
  89. case PT_EVB:
  90. __asm__ __volatile__ (
  91. "mvfc %0, cr5 \n\t"
  92. : "=r" (tmp)
  93. );
  94. break;
  95. case PT_CBR: {
  96. unsigned long psw;
  97. psw = get_stack_long(tsk, PT_PSW);
  98. tmp = ((psw >> 8) & 1);
  99. }
  100. break;
  101. case PT_PSW: {
  102. unsigned long psw, bbpsw;
  103. psw = get_stack_long(tsk, PT_PSW);
  104. bbpsw = get_stack_long(tsk, PT_BBPSW);
  105. tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
  106. }
  107. break;
  108. case PT_PC:
  109. tmp = get_stack_long(tsk, PT_BPC);
  110. break;
  111. case PT_BPC:
  112. off = PT_BBPC;
  113. /* fall through */
  114. default:
  115. if (off < (sizeof(struct pt_regs) >> 2))
  116. tmp = get_stack_long(tsk, off);
  117. #ifndef NO_FPU
  118. else if (off >= (long)(&dummy->fpu >> 2) &&
  119. off < (long)(&dummy->u_fpvalid >> 2)) {
  120. if (!tsk_used_math(tsk)) {
  121. if (off == (long)(&dummy->fpu.fpscr >> 2))
  122. tmp = FPSCR_INIT;
  123. else
  124. tmp = 0;
  125. } else
  126. tmp = ((long *)(&tsk->thread.fpu >> 2))
  127. [off - (long)&dummy->fpu];
  128. } else if (off == (long)(&dummy->u_fpvalid >> 2))
  129. tmp = !!tsk_used_math(tsk);
  130. #endif /* not NO_FPU */
  131. else
  132. tmp = 0;
  133. }
  134. return put_user(tmp, data);
  135. }
  136. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  137. unsigned long data)
  138. {
  139. int ret = -EIO;
  140. #ifndef NO_FPU
  141. struct user * dummy = NULL;
  142. #endif
  143. if ((off & 3) || off < 0 ||
  144. off > sizeof(struct user) - 3)
  145. return -EIO;
  146. off >>= 2;
  147. switch (off) {
  148. case PT_EVB:
  149. case PT_BPC:
  150. case PT_SPI:
  151. /* We don't allow to modify evb. */
  152. ret = 0;
  153. break;
  154. case PT_PSW:
  155. case PT_CBR: {
  156. /* We allow to modify only cbr in psw */
  157. unsigned long psw;
  158. psw = get_stack_long(tsk, PT_PSW);
  159. psw = (psw & ~0x100) | ((data & 1) << 8);
  160. ret = put_stack_long(tsk, PT_PSW, psw);
  161. }
  162. break;
  163. case PT_PC:
  164. off = PT_BPC;
  165. data &= ~1;
  166. /* fall through */
  167. default:
  168. if (off < (sizeof(struct pt_regs) >> 2))
  169. ret = put_stack_long(tsk, off, data);
  170. #ifndef NO_FPU
  171. else if (off >= (long)(&dummy->fpu >> 2) &&
  172. off < (long)(&dummy->u_fpvalid >> 2)) {
  173. set_stopped_child_used_math(tsk);
  174. ((long *)&tsk->thread.fpu)
  175. [off - (long)&dummy->fpu] = data;
  176. ret = 0;
  177. } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
  178. conditional_stopped_child_used_math(data, tsk);
  179. ret = 0;
  180. }
  181. #endif /* not NO_FPU */
  182. break;
  183. }
  184. return ret;
  185. }
  186. /*
  187. * Get all user integer registers.
  188. */
  189. static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
  190. {
  191. struct pt_regs *regs = get_user_regs(tsk);
  192. return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
  193. }
  194. /*
  195. * Set all user integer registers.
  196. */
  197. static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
  198. {
  199. struct pt_regs newregs;
  200. int ret;
  201. ret = -EFAULT;
  202. if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
  203. struct pt_regs *regs = get_user_regs(tsk);
  204. *regs = newregs;
  205. ret = 0;
  206. }
  207. return ret;
  208. }
  209. static inline int
  210. check_condition_bit(struct task_struct *child)
  211. {
  212. return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
  213. }
  214. static int
  215. check_condition_src(unsigned long op, unsigned long regno1,
  216. unsigned long regno2, struct task_struct *child)
  217. {
  218. unsigned long reg1, reg2;
  219. reg2 = get_stack_long(child, reg_offset[regno2]);
  220. switch (op) {
  221. case 0x0: /* BEQ */
  222. reg1 = get_stack_long(child, reg_offset[regno1]);
  223. return reg1 == reg2;
  224. case 0x1: /* BNE */
  225. reg1 = get_stack_long(child, reg_offset[regno1]);
  226. return reg1 != reg2;
  227. case 0x8: /* BEQZ */
  228. return reg2 == 0;
  229. case 0x9: /* BNEZ */
  230. return reg2 != 0;
  231. case 0xa: /* BLTZ */
  232. return (int)reg2 < 0;
  233. case 0xb: /* BGEZ */
  234. return (int)reg2 >= 0;
  235. case 0xc: /* BLEZ */
  236. return (int)reg2 <= 0;
  237. case 0xd: /* BGTZ */
  238. return (int)reg2 > 0;
  239. default:
  240. /* never reached */
  241. return 0;
  242. }
  243. }
  244. static void
  245. compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
  246. unsigned long *next_pc,
  247. struct task_struct *child)
  248. {
  249. unsigned long op, op2, op3;
  250. unsigned long disp;
  251. unsigned long regno;
  252. int parallel = 0;
  253. if (insn & 0x00008000)
  254. parallel = 1;
  255. if (pc & 3)
  256. insn &= 0x7fff; /* right slot */
  257. else
  258. insn >>= 16; /* left slot */
  259. op = (insn >> 12) & 0xf;
  260. op2 = (insn >> 8) & 0xf;
  261. op3 = (insn >> 4) & 0xf;
  262. if (op == 0x7) {
  263. switch (op2) {
  264. case 0xd: /* BNC */
  265. case 0x9: /* BNCL */
  266. if (!check_condition_bit(child)) {
  267. disp = (long)(insn << 24) >> 22;
  268. *next_pc = (pc & ~0x3) + disp;
  269. return;
  270. }
  271. break;
  272. case 0x8: /* BCL */
  273. case 0xc: /* BC */
  274. if (check_condition_bit(child)) {
  275. disp = (long)(insn << 24) >> 22;
  276. *next_pc = (pc & ~0x3) + disp;
  277. return;
  278. }
  279. break;
  280. case 0xe: /* BL */
  281. case 0xf: /* BRA */
  282. disp = (long)(insn << 24) >> 22;
  283. *next_pc = (pc & ~0x3) + disp;
  284. return;
  285. break;
  286. }
  287. } else if (op == 0x1) {
  288. switch (op2) {
  289. case 0x0:
  290. if (op3 == 0xf) { /* TRAP */
  291. #if 1
  292. /* pass through */
  293. #else
  294. /* kernel space is not allowed as next_pc */
  295. unsigned long evb;
  296. unsigned long trapno;
  297. trapno = insn & 0xf;
  298. __asm__ __volatile__ (
  299. "mvfc %0, cr5\n"
  300. :"=r"(evb)
  301. :
  302. );
  303. *next_pc = evb + (trapno << 2);
  304. return;
  305. #endif
  306. } else if (op3 == 0xd) { /* RTE */
  307. *next_pc = get_stack_long(child, PT_BPC);
  308. return;
  309. }
  310. break;
  311. case 0xc: /* JC */
  312. if (op3 == 0xc && check_condition_bit(child)) {
  313. regno = insn & 0xf;
  314. *next_pc = get_stack_long(child,
  315. reg_offset[regno]);
  316. return;
  317. }
  318. break;
  319. case 0xd: /* JNC */
  320. if (op3 == 0xc && !check_condition_bit(child)) {
  321. regno = insn & 0xf;
  322. *next_pc = get_stack_long(child,
  323. reg_offset[regno]);
  324. return;
  325. }
  326. break;
  327. case 0xe: /* JL */
  328. case 0xf: /* JMP */
  329. if (op3 == 0xc) { /* JMP */
  330. regno = insn & 0xf;
  331. *next_pc = get_stack_long(child,
  332. reg_offset[regno]);
  333. return;
  334. }
  335. break;
  336. }
  337. }
  338. if (parallel)
  339. *next_pc = pc + 4;
  340. else
  341. *next_pc = pc + 2;
  342. }
  343. static void
  344. compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
  345. unsigned long *next_pc,
  346. struct task_struct *child)
  347. {
  348. unsigned long op;
  349. unsigned long op2;
  350. unsigned long disp;
  351. unsigned long regno1, regno2;
  352. op = (insn >> 28) & 0xf;
  353. if (op == 0xf) { /* branch 24-bit relative */
  354. op2 = (insn >> 24) & 0xf;
  355. switch (op2) {
  356. case 0xd: /* BNC */
  357. case 0x9: /* BNCL */
  358. if (!check_condition_bit(child)) {
  359. disp = (long)(insn << 8) >> 6;
  360. *next_pc = (pc & ~0x3) + disp;
  361. return;
  362. }
  363. break;
  364. case 0x8: /* BCL */
  365. case 0xc: /* BC */
  366. if (check_condition_bit(child)) {
  367. disp = (long)(insn << 8) >> 6;
  368. *next_pc = (pc & ~0x3) + disp;
  369. return;
  370. }
  371. break;
  372. case 0xe: /* BL */
  373. case 0xf: /* BRA */
  374. disp = (long)(insn << 8) >> 6;
  375. *next_pc = (pc & ~0x3) + disp;
  376. return;
  377. }
  378. } else if (op == 0xb) { /* branch 16-bit relative */
  379. op2 = (insn >> 20) & 0xf;
  380. switch (op2) {
  381. case 0x0: /* BEQ */
  382. case 0x1: /* BNE */
  383. case 0x8: /* BEQZ */
  384. case 0x9: /* BNEZ */
  385. case 0xa: /* BLTZ */
  386. case 0xb: /* BGEZ */
  387. case 0xc: /* BLEZ */
  388. case 0xd: /* BGTZ */
  389. regno1 = ((insn >> 24) & 0xf);
  390. regno2 = ((insn >> 16) & 0xf);
  391. if (check_condition_src(op2, regno1, regno2, child)) {
  392. disp = (long)(insn << 16) >> 14;
  393. *next_pc = (pc & ~0x3) + disp;
  394. return;
  395. }
  396. break;
  397. }
  398. }
  399. *next_pc = pc + 4;
  400. }
  401. static inline void
  402. compute_next_pc(unsigned long insn, unsigned long pc,
  403. unsigned long *next_pc, struct task_struct *child)
  404. {
  405. if (insn & 0x80000000)
  406. compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
  407. else
  408. compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
  409. }
  410. static int
  411. register_debug_trap(struct task_struct *child, unsigned long next_pc,
  412. unsigned long next_insn, unsigned long *code)
  413. {
  414. struct debug_trap *p = &child->thread.debug_trap;
  415. unsigned long addr = next_pc & ~3;
  416. if (p->nr_trap == MAX_TRAPS) {
  417. printk("kernel BUG at %s %d: p->nr_trap = %d\n",
  418. __FILE__, __LINE__, p->nr_trap);
  419. return -1;
  420. }
  421. p->addr[p->nr_trap] = addr;
  422. p->insn[p->nr_trap] = next_insn;
  423. p->nr_trap++;
  424. if (next_pc & 3) {
  425. *code = (next_insn & 0xffff0000) | 0x10f1;
  426. /* xxx --> TRAP1 */
  427. } else {
  428. if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
  429. *code = 0x10f17000;
  430. /* TRAP1 --> NOP */
  431. } else {
  432. *code = (next_insn & 0xffff) | 0x10f10000;
  433. /* TRAP1 --> xxx */
  434. }
  435. }
  436. return 0;
  437. }
  438. static int
  439. unregister_debug_trap(struct task_struct *child, unsigned long addr,
  440. unsigned long *code)
  441. {
  442. struct debug_trap *p = &child->thread.debug_trap;
  443. int i;
  444. /* Search debug trap entry. */
  445. for (i = 0; i < p->nr_trap; i++) {
  446. if (p->addr[i] == addr)
  447. break;
  448. }
  449. if (i >= p->nr_trap) {
  450. /* The trap may be requested from debugger.
  451. * ptrace should do nothing in this case.
  452. */
  453. return 0;
  454. }
  455. /* Recover orignal instruction code. */
  456. *code = p->insn[i];
  457. /* Shift debug trap entries. */
  458. while (i < p->nr_trap - 1) {
  459. p->insn[i] = p->insn[i + 1];
  460. p->addr[i] = p->addr[i + 1];
  461. i++;
  462. }
  463. p->nr_trap--;
  464. return 1;
  465. }
  466. static void
  467. unregister_all_debug_traps(struct task_struct *child)
  468. {
  469. struct debug_trap *p = &child->thread.debug_trap;
  470. int i;
  471. for (i = 0; i < p->nr_trap; i++)
  472. access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
  473. p->nr_trap = 0;
  474. }
  475. static inline void
  476. invalidate_cache(void)
  477. {
  478. #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
  479. _flush_cache_copyback_all();
  480. #else /* ! CONFIG_CHIP_M32700 */
  481. /* Invalidate cache */
  482. __asm__ __volatile__ (
  483. "ldi r0, #-1 \n\t"
  484. "ldi r1, #0 \n\t"
  485. "stb r1, @r0 ; cache off \n\t"
  486. "; \n\t"
  487. "ldi r0, #-2 \n\t"
  488. "ldi r1, #1 \n\t"
  489. "stb r1, @r0 ; cache invalidate \n\t"
  490. ".fillinsn \n"
  491. "0: \n\t"
  492. "ldb r1, @r0 ; invalidate check \n\t"
  493. "bnez r1, 0b \n\t"
  494. "; \n\t"
  495. "ldi r0, #-1 \n\t"
  496. "ldi r1, #1 \n\t"
  497. "stb r1, @r0 ; cache on \n\t"
  498. : : : "r0", "r1", "memory"
  499. );
  500. /* FIXME: copying-back d-cache and invalidating i-cache are needed.
  501. */
  502. #endif /* CONFIG_CHIP_M32700 */
  503. }
  504. /* Embed a debug trap (TRAP1) code */
  505. static int
  506. embed_debug_trap(struct task_struct *child, unsigned long next_pc)
  507. {
  508. unsigned long next_insn, code;
  509. unsigned long addr = next_pc & ~3;
  510. if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
  511. != sizeof(next_insn)) {
  512. return -1; /* error */
  513. }
  514. /* Set a trap code. */
  515. if (register_debug_trap(child, next_pc, next_insn, &code)) {
  516. return -1; /* error */
  517. }
  518. if (access_process_vm(child, addr, &code, sizeof(code), 1)
  519. != sizeof(code)) {
  520. return -1; /* error */
  521. }
  522. return 0; /* success */
  523. }
  524. void
  525. withdraw_debug_trap(struct pt_regs *regs)
  526. {
  527. unsigned long addr;
  528. unsigned long code;
  529. addr = (regs->bpc - 2) & ~3;
  530. regs->bpc -= 2;
  531. if (unregister_debug_trap(current, addr, &code)) {
  532. access_process_vm(current, addr, &code, sizeof(code), 1);
  533. invalidate_cache();
  534. }
  535. }
  536. static void
  537. init_debug_traps(struct task_struct *child)
  538. {
  539. struct debug_trap *p = &child->thread.debug_trap;
  540. int i;
  541. p->nr_trap = 0;
  542. for (i = 0; i < MAX_TRAPS; i++) {
  543. p->addr[i] = 0;
  544. p->insn[i] = 0;
  545. }
  546. }
  547. /*
  548. * Called by kernel/ptrace.c when detaching..
  549. *
  550. * Make sure single step bits etc are not set.
  551. */
  552. void ptrace_disable(struct task_struct *child)
  553. {
  554. /* nothing to do.. */
  555. }
  556. static int
  557. do_ptrace(long request, struct task_struct *child, long addr, long data)
  558. {
  559. unsigned long tmp;
  560. int ret;
  561. switch (request) {
  562. /*
  563. * read word at location "addr" in the child process.
  564. */
  565. case PTRACE_PEEKTEXT:
  566. case PTRACE_PEEKDATA:
  567. ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
  568. if (ret == sizeof(tmp))
  569. ret = put_user(tmp,(unsigned long __user *) data);
  570. else
  571. ret = -EIO;
  572. break;
  573. /*
  574. * read the word at location addr in the USER area.
  575. */
  576. case PTRACE_PEEKUSR:
  577. ret = ptrace_read_user(child, addr,
  578. (unsigned long __user *)data);
  579. break;
  580. /*
  581. * write the word at location addr.
  582. */
  583. case PTRACE_POKETEXT:
  584. case PTRACE_POKEDATA:
  585. ret = access_process_vm(child, addr, &data, sizeof(data), 1);
  586. if (ret == sizeof(data)) {
  587. ret = 0;
  588. if (request == PTRACE_POKETEXT) {
  589. invalidate_cache();
  590. }
  591. } else {
  592. ret = -EIO;
  593. }
  594. break;
  595. /*
  596. * write the word at location addr in the USER area.
  597. */
  598. case PTRACE_POKEUSR:
  599. ret = ptrace_write_user(child, addr, data);
  600. break;
  601. /*
  602. * continue/restart and stop at next (return from) syscall
  603. */
  604. case PTRACE_SYSCALL:
  605. case PTRACE_CONT:
  606. ret = -EIO;
  607. if ((unsigned long) data > _NSIG)
  608. break;
  609. if (request == PTRACE_SYSCALL)
  610. set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  611. else
  612. clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  613. child->exit_code = data;
  614. wake_up_process(child);
  615. ret = 0;
  616. break;
  617. /*
  618. * make the child exit. Best I can do is send it a sigkill.
  619. * perhaps it should be put in the status that it wants to
  620. * exit.
  621. */
  622. case PTRACE_KILL: {
  623. ret = 0;
  624. unregister_all_debug_traps(child);
  625. invalidate_cache();
  626. if (child->exit_state == EXIT_ZOMBIE) /* already dead */
  627. break;
  628. child->exit_code = SIGKILL;
  629. wake_up_process(child);
  630. break;
  631. }
  632. /*
  633. * execute single instruction.
  634. */
  635. case PTRACE_SINGLESTEP: {
  636. unsigned long next_pc;
  637. unsigned long pc, insn;
  638. ret = -EIO;
  639. if ((unsigned long) data > _NSIG)
  640. break;
  641. clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  642. if ((child->ptrace & PT_DTRACE) == 0) {
  643. /* Spurious delayed TF traps may occur */
  644. child->ptrace |= PT_DTRACE;
  645. }
  646. /* Compute next pc. */
  647. pc = get_stack_long(child, PT_BPC);
  648. if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
  649. != sizeof(insn))
  650. break;
  651. compute_next_pc(insn, pc, &next_pc, child);
  652. if (next_pc & 0x80000000)
  653. break;
  654. if (embed_debug_trap(child, next_pc))
  655. break;
  656. invalidate_cache();
  657. child->exit_code = data;
  658. /* give it a chance to run. */
  659. wake_up_process(child);
  660. ret = 0;
  661. break;
  662. }
  663. /*
  664. * detach a process that was attached.
  665. */
  666. case PTRACE_DETACH:
  667. ret = 0;
  668. ret = ptrace_detach(child, data);
  669. break;
  670. case PTRACE_GETREGS:
  671. ret = ptrace_getregs(child, (void __user *)data);
  672. break;
  673. case PTRACE_SETREGS:
  674. ret = ptrace_setregs(child, (void __user *)data);
  675. break;
  676. default:
  677. ret = ptrace_request(child, request, addr, data);
  678. break;
  679. }
  680. return ret;
  681. }
  682. asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
  683. {
  684. struct task_struct *child;
  685. int ret;
  686. lock_kernel();
  687. ret = -EPERM;
  688. if (request == PTRACE_TRACEME) {
  689. /* are we already being traced? */
  690. if (current->ptrace & PT_PTRACED)
  691. goto out;
  692. /* set the ptrace bit in the process flags. */
  693. current->ptrace |= PT_PTRACED;
  694. ret = 0;
  695. goto out;
  696. }
  697. ret = -ESRCH;
  698. read_lock(&tasklist_lock);
  699. child = find_task_by_pid(pid);
  700. if (child)
  701. get_task_struct(child);
  702. read_unlock(&tasklist_lock);
  703. if (!child)
  704. goto out;
  705. ret = -EPERM;
  706. if (pid == 1) /* you may not mess with init */
  707. goto out;
  708. if (request == PTRACE_ATTACH) {
  709. ret = ptrace_attach(child);
  710. if (ret == 0)
  711. init_debug_traps(child);
  712. goto out_tsk;
  713. }
  714. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  715. if (ret == 0)
  716. ret = do_ptrace(request, child, addr, data);
  717. out_tsk:
  718. put_task_struct(child);
  719. out:
  720. unlock_kernel();
  721. return ret;
  722. }
  723. /* notification of system call entry/exit
  724. * - triggered by current->work.syscall_trace
  725. */
  726. void do_syscall_trace(void)
  727. {
  728. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  729. return;
  730. if (!(current->ptrace & PT_PTRACED))
  731. return;
  732. /* the 0x80 provides a way for the tracing parent to distinguish
  733. between a syscall stop and SIGTRAP delivery */
  734. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  735. ? 0x80 : 0));
  736. /*
  737. * this isn't the same as continuing with a signal, but it will do
  738. * for normal use. strace only continues with a signal if the
  739. * stopping signal is not SIGTRAP. -brl
  740. */
  741. if (current->exit_code) {
  742. send_sig(current->exit_code, current, 1);
  743. current->exit_code = 0;
  744. }
  745. }