ptrace_32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/signal.h>
  22. #include <linux/io.h>
  23. #include <linux/audit.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/elf.h>
  27. #include <linux/regset.h>
  28. #include <linux/hw_breakpoint.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/syscalls.h>
  35. #include <asm/fpu.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/syscalls.h>
  38. /*
  39. * This routine will get a word off of the process kernel stack.
  40. */
  41. static inline int get_stack_long(struct task_struct *task, int offset)
  42. {
  43. unsigned char *stack;
  44. stack = (unsigned char *)task_pt_regs(task);
  45. stack += offset;
  46. return (*((int *)stack));
  47. }
  48. /*
  49. * This routine will put a word on the process kernel stack.
  50. */
  51. static inline int put_stack_long(struct task_struct *task, int offset,
  52. unsigned long data)
  53. {
  54. unsigned char *stack;
  55. stack = (unsigned char *)task_pt_regs(task);
  56. stack += offset;
  57. *(unsigned long *) stack = data;
  58. return 0;
  59. }
  60. void ptrace_triggered(struct perf_event *bp, int nmi,
  61. struct perf_sample_data *data, struct pt_regs *regs)
  62. {
  63. struct perf_event_attr attr;
  64. /*
  65. * Disable the breakpoint request here since ptrace has defined a
  66. * one-shot behaviour for breakpoint exceptions.
  67. */
  68. attr = bp->attr;
  69. attr.disabled = true;
  70. modify_user_hw_breakpoint(bp, &attr);
  71. }
  72. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  73. {
  74. struct thread_struct *thread = &tsk->thread;
  75. struct perf_event *bp;
  76. struct perf_event_attr attr;
  77. bp = thread->ptrace_bps[0];
  78. if (!bp) {
  79. ptrace_breakpoint_init(&attr);
  80. attr.bp_addr = addr;
  81. attr.bp_len = HW_BREAKPOINT_LEN_2;
  82. attr.bp_type = HW_BREAKPOINT_R;
  83. bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
  84. if (IS_ERR(bp))
  85. return PTR_ERR(bp);
  86. thread->ptrace_bps[0] = bp;
  87. } else {
  88. int err;
  89. attr = bp->attr;
  90. attr.bp_addr = addr;
  91. err = modify_user_hw_breakpoint(bp, &attr);
  92. if (unlikely(err))
  93. return err;
  94. }
  95. return 0;
  96. }
  97. void user_enable_single_step(struct task_struct *child)
  98. {
  99. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  100. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  101. set_single_step(child, pc);
  102. }
  103. void user_disable_single_step(struct task_struct *child)
  104. {
  105. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  106. }
  107. /*
  108. * Called by kernel/ptrace.c when detaching..
  109. *
  110. * Make sure single step bits etc are not set.
  111. */
  112. void ptrace_disable(struct task_struct *child)
  113. {
  114. user_disable_single_step(child);
  115. }
  116. static int genregs_get(struct task_struct *target,
  117. const struct user_regset *regset,
  118. unsigned int pos, unsigned int count,
  119. void *kbuf, void __user *ubuf)
  120. {
  121. const struct pt_regs *regs = task_pt_regs(target);
  122. int ret;
  123. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  124. regs->regs,
  125. 0, 16 * sizeof(unsigned long));
  126. if (!ret)
  127. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  128. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  129. &regs->pc,
  130. offsetof(struct pt_regs, pc),
  131. sizeof(struct pt_regs));
  132. if (!ret)
  133. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  134. sizeof(struct pt_regs), -1);
  135. return ret;
  136. }
  137. static int genregs_set(struct task_struct *target,
  138. const struct user_regset *regset,
  139. unsigned int pos, unsigned int count,
  140. const void *kbuf, const void __user *ubuf)
  141. {
  142. struct pt_regs *regs = task_pt_regs(target);
  143. int ret;
  144. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  145. regs->regs,
  146. 0, 16 * sizeof(unsigned long));
  147. if (!ret && count > 0)
  148. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  149. &regs->pc,
  150. offsetof(struct pt_regs, pc),
  151. sizeof(struct pt_regs));
  152. if (!ret)
  153. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  154. sizeof(struct pt_regs), -1);
  155. return ret;
  156. }
  157. #ifdef CONFIG_SH_FPU
  158. int fpregs_get(struct task_struct *target,
  159. const struct user_regset *regset,
  160. unsigned int pos, unsigned int count,
  161. void *kbuf, void __user *ubuf)
  162. {
  163. int ret;
  164. ret = init_fpu(target);
  165. if (ret)
  166. return ret;
  167. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  168. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  169. &target->thread.xstate->hardfpu, 0, -1);
  170. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  171. &target->thread.xstate->softfpu, 0, -1);
  172. }
  173. static int fpregs_set(struct task_struct *target,
  174. const struct user_regset *regset,
  175. unsigned int pos, unsigned int count,
  176. const void *kbuf, const void __user *ubuf)
  177. {
  178. int ret;
  179. ret = init_fpu(target);
  180. if (ret)
  181. return ret;
  182. set_stopped_child_used_math(target);
  183. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  184. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  185. &target->thread.xstate->hardfpu, 0, -1);
  186. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  187. &target->thread.xstate->softfpu, 0, -1);
  188. }
  189. static int fpregs_active(struct task_struct *target,
  190. const struct user_regset *regset)
  191. {
  192. return tsk_used_math(target) ? regset->n : 0;
  193. }
  194. #endif
  195. #ifdef CONFIG_SH_DSP
  196. static int dspregs_get(struct task_struct *target,
  197. const struct user_regset *regset,
  198. unsigned int pos, unsigned int count,
  199. void *kbuf, void __user *ubuf)
  200. {
  201. const struct pt_dspregs *regs =
  202. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  203. int ret;
  204. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  205. 0, sizeof(struct pt_dspregs));
  206. if (!ret)
  207. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  208. sizeof(struct pt_dspregs), -1);
  209. return ret;
  210. }
  211. static int dspregs_set(struct task_struct *target,
  212. const struct user_regset *regset,
  213. unsigned int pos, unsigned int count,
  214. const void *kbuf, const void __user *ubuf)
  215. {
  216. struct pt_dspregs *regs =
  217. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  218. int ret;
  219. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  220. 0, sizeof(struct pt_dspregs));
  221. if (!ret)
  222. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  223. sizeof(struct pt_dspregs), -1);
  224. return ret;
  225. }
  226. static int dspregs_active(struct task_struct *target,
  227. const struct user_regset *regset)
  228. {
  229. struct pt_regs *regs = task_pt_regs(target);
  230. return regs->sr & SR_DSP ? regset->n : 0;
  231. }
  232. #endif
  233. /*
  234. * These are our native regset flavours.
  235. */
  236. enum sh_regset {
  237. REGSET_GENERAL,
  238. #ifdef CONFIG_SH_FPU
  239. REGSET_FPU,
  240. #endif
  241. #ifdef CONFIG_SH_DSP
  242. REGSET_DSP,
  243. #endif
  244. };
  245. static const struct user_regset sh_regsets[] = {
  246. /*
  247. * Format is:
  248. * R0 --> R15
  249. * PC, PR, SR, GBR, MACH, MACL, TRA
  250. */
  251. [REGSET_GENERAL] = {
  252. .core_note_type = NT_PRSTATUS,
  253. .n = ELF_NGREG,
  254. .size = sizeof(long),
  255. .align = sizeof(long),
  256. .get = genregs_get,
  257. .set = genregs_set,
  258. },
  259. #ifdef CONFIG_SH_FPU
  260. [REGSET_FPU] = {
  261. .core_note_type = NT_PRFPREG,
  262. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  263. .size = sizeof(long),
  264. .align = sizeof(long),
  265. .get = fpregs_get,
  266. .set = fpregs_set,
  267. .active = fpregs_active,
  268. },
  269. #endif
  270. #ifdef CONFIG_SH_DSP
  271. [REGSET_DSP] = {
  272. .n = sizeof(struct pt_dspregs) / sizeof(long),
  273. .size = sizeof(long),
  274. .align = sizeof(long),
  275. .get = dspregs_get,
  276. .set = dspregs_set,
  277. .active = dspregs_active,
  278. },
  279. #endif
  280. };
  281. static const struct user_regset_view user_sh_native_view = {
  282. .name = "sh",
  283. .e_machine = EM_SH,
  284. .regsets = sh_regsets,
  285. .n = ARRAY_SIZE(sh_regsets),
  286. };
  287. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  288. {
  289. return &user_sh_native_view;
  290. }
  291. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  292. {
  293. struct user * dummy = NULL;
  294. unsigned long __user *datap = (unsigned long __user *)data;
  295. int ret;
  296. switch (request) {
  297. /* read the word at location addr in the USER area. */
  298. case PTRACE_PEEKUSR: {
  299. unsigned long tmp;
  300. ret = -EIO;
  301. if ((addr & 3) || addr < 0 ||
  302. addr > sizeof(struct user) - 3)
  303. break;
  304. if (addr < sizeof(struct pt_regs))
  305. tmp = get_stack_long(child, addr);
  306. else if (addr >= (long) &dummy->fpu &&
  307. addr < (long) &dummy->u_fpvalid) {
  308. if (!tsk_used_math(child)) {
  309. if (addr == (long)&dummy->fpu.fpscr)
  310. tmp = FPSCR_INIT;
  311. else
  312. tmp = 0;
  313. } else
  314. tmp = ((long *)child->thread.xstate)
  315. [(addr - (long)&dummy->fpu) >> 2];
  316. } else if (addr == (long) &dummy->u_fpvalid)
  317. tmp = !!tsk_used_math(child);
  318. else if (addr == PT_TEXT_ADDR)
  319. tmp = child->mm->start_code;
  320. else if (addr == PT_DATA_ADDR)
  321. tmp = child->mm->start_data;
  322. else if (addr == PT_TEXT_END_ADDR)
  323. tmp = child->mm->end_code;
  324. else if (addr == PT_TEXT_LEN)
  325. tmp = child->mm->end_code - child->mm->start_code;
  326. else
  327. tmp = 0;
  328. ret = put_user(tmp, datap);
  329. break;
  330. }
  331. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  332. ret = -EIO;
  333. if ((addr & 3) || addr < 0 ||
  334. addr > sizeof(struct user) - 3)
  335. break;
  336. if (addr < sizeof(struct pt_regs))
  337. ret = put_stack_long(child, addr, data);
  338. else if (addr >= (long) &dummy->fpu &&
  339. addr < (long) &dummy->u_fpvalid) {
  340. set_stopped_child_used_math(child);
  341. ((long *)child->thread.xstate)
  342. [(addr - (long)&dummy->fpu) >> 2] = data;
  343. ret = 0;
  344. } else if (addr == (long) &dummy->u_fpvalid) {
  345. conditional_stopped_child_used_math(data, child);
  346. ret = 0;
  347. }
  348. break;
  349. case PTRACE_GETREGS:
  350. return copy_regset_to_user(child, &user_sh_native_view,
  351. REGSET_GENERAL,
  352. 0, sizeof(struct pt_regs),
  353. (void __user *)data);
  354. case PTRACE_SETREGS:
  355. return copy_regset_from_user(child, &user_sh_native_view,
  356. REGSET_GENERAL,
  357. 0, sizeof(struct pt_regs),
  358. (const void __user *)data);
  359. #ifdef CONFIG_SH_FPU
  360. case PTRACE_GETFPREGS:
  361. return copy_regset_to_user(child, &user_sh_native_view,
  362. REGSET_FPU,
  363. 0, sizeof(struct user_fpu_struct),
  364. (void __user *)data);
  365. case PTRACE_SETFPREGS:
  366. return copy_regset_from_user(child, &user_sh_native_view,
  367. REGSET_FPU,
  368. 0, sizeof(struct user_fpu_struct),
  369. (const void __user *)data);
  370. #endif
  371. #ifdef CONFIG_SH_DSP
  372. case PTRACE_GETDSPREGS:
  373. return copy_regset_to_user(child, &user_sh_native_view,
  374. REGSET_DSP,
  375. 0, sizeof(struct pt_dspregs),
  376. (void __user *)data);
  377. case PTRACE_SETDSPREGS:
  378. return copy_regset_from_user(child, &user_sh_native_view,
  379. REGSET_DSP,
  380. 0, sizeof(struct pt_dspregs),
  381. (const void __user *)data);
  382. #endif
  383. default:
  384. ret = ptrace_request(child, request, addr, data);
  385. break;
  386. }
  387. return ret;
  388. }
  389. static inline int audit_arch(void)
  390. {
  391. int arch = EM_SH;
  392. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  393. arch |= __AUDIT_ARCH_LE;
  394. #endif
  395. return arch;
  396. }
  397. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  398. {
  399. long ret = 0;
  400. secure_computing(regs->regs[0]);
  401. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  402. tracehook_report_syscall_entry(regs))
  403. /*
  404. * Tracing decided this syscall should not happen.
  405. * We'll return a bogus call number to get an ENOSYS
  406. * error, but leave the original number in regs->regs[0].
  407. */
  408. ret = -1L;
  409. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  410. trace_sys_enter(regs, regs->regs[0]);
  411. if (unlikely(current->audit_context))
  412. audit_syscall_entry(audit_arch(), regs->regs[3],
  413. regs->regs[4], regs->regs[5],
  414. regs->regs[6], regs->regs[7]);
  415. return ret ?: regs->regs[0];
  416. }
  417. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  418. {
  419. int step;
  420. if (unlikely(current->audit_context))
  421. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  422. regs->regs[0]);
  423. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  424. trace_sys_exit(regs, regs->regs[0]);
  425. step = test_thread_flag(TIF_SINGLESTEP);
  426. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  427. tracehook_report_syscall_exit(regs, step);
  428. }