ptrace_32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2008 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/slab.h>
  21. #include <linux/security.h>
  22. #include <linux/signal.h>
  23. #include <linux/io.h>
  24. #include <linux/audit.h>
  25. #include <linux/seccomp.h>
  26. #include <linux/tracehook.h>
  27. #include <linux/elf.h>
  28. #include <linux/regset.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/syscalls.h>
  35. #include <asm/fpu.h>
  36. #include <trace/syscall.h>
  37. /*
  38. * This routine will get a word off of the process kernel stack.
  39. */
  40. static inline int get_stack_long(struct task_struct *task, int offset)
  41. {
  42. unsigned char *stack;
  43. stack = (unsigned char *)task_pt_regs(task);
  44. stack += offset;
  45. return (*((int *)stack));
  46. }
  47. /*
  48. * This routine will put a word on the process kernel stack.
  49. */
  50. static inline int put_stack_long(struct task_struct *task, int offset,
  51. unsigned long data)
  52. {
  53. unsigned char *stack;
  54. stack = (unsigned char *)task_pt_regs(task);
  55. stack += offset;
  56. *(unsigned long *) stack = data;
  57. return 0;
  58. }
  59. void user_enable_single_step(struct task_struct *child)
  60. {
  61. /* Next scheduling will set up UBC */
  62. if (child->thread.ubc_pc == 0)
  63. ubc_usercnt += 1;
  64. child->thread.ubc_pc = get_stack_long(child,
  65. offsetof(struct pt_regs, pc));
  66. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  67. }
  68. void user_disable_single_step(struct task_struct *child)
  69. {
  70. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  71. /*
  72. * Ensure the UBC is not programmed at the next context switch.
  73. *
  74. * Normally this is not needed but there are sequences such as
  75. * singlestep, signal delivery, and continue that leave the
  76. * ubc_pc non-zero leading to spurious SIGTRAPs.
  77. */
  78. if (child->thread.ubc_pc != 0) {
  79. ubc_usercnt -= 1;
  80. child->thread.ubc_pc = 0;
  81. }
  82. }
  83. /*
  84. * Called by kernel/ptrace.c when detaching..
  85. *
  86. * Make sure single step bits etc are not set.
  87. */
  88. void ptrace_disable(struct task_struct *child)
  89. {
  90. user_disable_single_step(child);
  91. }
  92. static int genregs_get(struct task_struct *target,
  93. const struct user_regset *regset,
  94. unsigned int pos, unsigned int count,
  95. void *kbuf, void __user *ubuf)
  96. {
  97. const struct pt_regs *regs = task_pt_regs(target);
  98. int ret;
  99. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  100. regs->regs,
  101. 0, 16 * sizeof(unsigned long));
  102. if (!ret)
  103. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  104. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  105. &regs->pc,
  106. offsetof(struct pt_regs, pc),
  107. sizeof(struct pt_regs));
  108. if (!ret)
  109. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  110. sizeof(struct pt_regs), -1);
  111. return ret;
  112. }
  113. static int genregs_set(struct task_struct *target,
  114. const struct user_regset *regset,
  115. unsigned int pos, unsigned int count,
  116. const void *kbuf, const void __user *ubuf)
  117. {
  118. struct pt_regs *regs = task_pt_regs(target);
  119. int ret;
  120. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  121. regs->regs,
  122. 0, 16 * sizeof(unsigned long));
  123. if (!ret && count > 0)
  124. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  125. &regs->pc,
  126. offsetof(struct pt_regs, pc),
  127. sizeof(struct pt_regs));
  128. if (!ret)
  129. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  130. sizeof(struct pt_regs), -1);
  131. return ret;
  132. }
  133. #ifdef CONFIG_SH_FPU
  134. int fpregs_get(struct task_struct *target,
  135. const struct user_regset *regset,
  136. unsigned int pos, unsigned int count,
  137. void *kbuf, void __user *ubuf)
  138. {
  139. int ret;
  140. ret = init_fpu(target);
  141. if (ret)
  142. return ret;
  143. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  144. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  145. &target->thread.fpu.hard, 0, -1);
  146. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  147. &target->thread.fpu.soft, 0, -1);
  148. }
  149. static int fpregs_set(struct task_struct *target,
  150. const struct user_regset *regset,
  151. unsigned int pos, unsigned int count,
  152. const void *kbuf, const void __user *ubuf)
  153. {
  154. int ret;
  155. ret = init_fpu(target);
  156. if (ret)
  157. return ret;
  158. set_stopped_child_used_math(target);
  159. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  160. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  161. &target->thread.fpu.hard, 0, -1);
  162. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  163. &target->thread.fpu.soft, 0, -1);
  164. }
  165. static int fpregs_active(struct task_struct *target,
  166. const struct user_regset *regset)
  167. {
  168. return tsk_used_math(target) ? regset->n : 0;
  169. }
  170. #endif
  171. #ifdef CONFIG_SH_DSP
  172. static int dspregs_get(struct task_struct *target,
  173. const struct user_regset *regset,
  174. unsigned int pos, unsigned int count,
  175. void *kbuf, void __user *ubuf)
  176. {
  177. const struct pt_dspregs *regs =
  178. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  179. int ret;
  180. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  181. 0, sizeof(struct pt_dspregs));
  182. if (!ret)
  183. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  184. sizeof(struct pt_dspregs), -1);
  185. return ret;
  186. }
  187. static int dspregs_set(struct task_struct *target,
  188. const struct user_regset *regset,
  189. unsigned int pos, unsigned int count,
  190. const void *kbuf, const void __user *ubuf)
  191. {
  192. struct pt_dspregs *regs =
  193. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  194. int ret;
  195. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  196. 0, sizeof(struct pt_dspregs));
  197. if (!ret)
  198. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  199. sizeof(struct pt_dspregs), -1);
  200. return ret;
  201. }
  202. static int dspregs_active(struct task_struct *target,
  203. const struct user_regset *regset)
  204. {
  205. struct pt_regs *regs = task_pt_regs(target);
  206. return regs->sr & SR_DSP ? regset->n : 0;
  207. }
  208. #endif
  209. /*
  210. * These are our native regset flavours.
  211. */
  212. enum sh_regset {
  213. REGSET_GENERAL,
  214. #ifdef CONFIG_SH_FPU
  215. REGSET_FPU,
  216. #endif
  217. #ifdef CONFIG_SH_DSP
  218. REGSET_DSP,
  219. #endif
  220. };
  221. static const struct user_regset sh_regsets[] = {
  222. /*
  223. * Format is:
  224. * R0 --> R15
  225. * PC, PR, SR, GBR, MACH, MACL, TRA
  226. */
  227. [REGSET_GENERAL] = {
  228. .core_note_type = NT_PRSTATUS,
  229. .n = ELF_NGREG,
  230. .size = sizeof(long),
  231. .align = sizeof(long),
  232. .get = genregs_get,
  233. .set = genregs_set,
  234. },
  235. #ifdef CONFIG_SH_FPU
  236. [REGSET_FPU] = {
  237. .core_note_type = NT_PRFPREG,
  238. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  239. .size = sizeof(long),
  240. .align = sizeof(long),
  241. .get = fpregs_get,
  242. .set = fpregs_set,
  243. .active = fpregs_active,
  244. },
  245. #endif
  246. #ifdef CONFIG_SH_DSP
  247. [REGSET_DSP] = {
  248. .n = sizeof(struct pt_dspregs) / sizeof(long),
  249. .size = sizeof(long),
  250. .align = sizeof(long),
  251. .get = dspregs_get,
  252. .set = dspregs_set,
  253. .active = dspregs_active,
  254. },
  255. #endif
  256. };
  257. static const struct user_regset_view user_sh_native_view = {
  258. .name = "sh",
  259. .e_machine = EM_SH,
  260. .regsets = sh_regsets,
  261. .n = ARRAY_SIZE(sh_regsets),
  262. };
  263. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  264. {
  265. return &user_sh_native_view;
  266. }
  267. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  268. {
  269. struct user * dummy = NULL;
  270. unsigned long __user *datap = (unsigned long __user *)data;
  271. int ret;
  272. switch (request) {
  273. /* read the word at location addr in the USER area. */
  274. case PTRACE_PEEKUSR: {
  275. unsigned long tmp;
  276. ret = -EIO;
  277. if ((addr & 3) || addr < 0 ||
  278. addr > sizeof(struct user) - 3)
  279. break;
  280. if (addr < sizeof(struct pt_regs))
  281. tmp = get_stack_long(child, addr);
  282. else if (addr >= (long) &dummy->fpu &&
  283. addr < (long) &dummy->u_fpvalid) {
  284. if (!tsk_used_math(child)) {
  285. if (addr == (long)&dummy->fpu.fpscr)
  286. tmp = FPSCR_INIT;
  287. else
  288. tmp = 0;
  289. } else
  290. tmp = ((long *)&child->thread.fpu)
  291. [(addr - (long)&dummy->fpu) >> 2];
  292. } else if (addr == (long) &dummy->u_fpvalid)
  293. tmp = !!tsk_used_math(child);
  294. else if (addr == PT_TEXT_ADDR)
  295. tmp = child->mm->start_code;
  296. else if (addr == PT_DATA_ADDR)
  297. tmp = child->mm->start_data;
  298. else if (addr == PT_TEXT_END_ADDR)
  299. tmp = child->mm->end_code;
  300. else if (addr == PT_TEXT_LEN)
  301. tmp = child->mm->end_code - child->mm->start_code;
  302. else
  303. tmp = 0;
  304. ret = put_user(tmp, datap);
  305. break;
  306. }
  307. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  308. ret = -EIO;
  309. if ((addr & 3) || addr < 0 ||
  310. addr > sizeof(struct user) - 3)
  311. break;
  312. if (addr < sizeof(struct pt_regs))
  313. ret = put_stack_long(child, addr, data);
  314. else if (addr >= (long) &dummy->fpu &&
  315. addr < (long) &dummy->u_fpvalid) {
  316. set_stopped_child_used_math(child);
  317. ((long *)&child->thread.fpu)
  318. [(addr - (long)&dummy->fpu) >> 2] = data;
  319. ret = 0;
  320. } else if (addr == (long) &dummy->u_fpvalid) {
  321. conditional_stopped_child_used_math(data, child);
  322. ret = 0;
  323. }
  324. break;
  325. case PTRACE_GETREGS:
  326. return copy_regset_to_user(child, &user_sh_native_view,
  327. REGSET_GENERAL,
  328. 0, sizeof(struct pt_regs),
  329. (void __user *)data);
  330. case PTRACE_SETREGS:
  331. return copy_regset_from_user(child, &user_sh_native_view,
  332. REGSET_GENERAL,
  333. 0, sizeof(struct pt_regs),
  334. (const void __user *)data);
  335. #ifdef CONFIG_SH_FPU
  336. case PTRACE_GETFPREGS:
  337. return copy_regset_to_user(child, &user_sh_native_view,
  338. REGSET_FPU,
  339. 0, sizeof(struct user_fpu_struct),
  340. (void __user *)data);
  341. case PTRACE_SETFPREGS:
  342. return copy_regset_from_user(child, &user_sh_native_view,
  343. REGSET_FPU,
  344. 0, sizeof(struct user_fpu_struct),
  345. (const void __user *)data);
  346. #endif
  347. #ifdef CONFIG_SH_DSP
  348. case PTRACE_GETDSPREGS:
  349. return copy_regset_to_user(child, &user_sh_native_view,
  350. REGSET_DSP,
  351. 0, sizeof(struct pt_dspregs),
  352. (void __user *)data);
  353. case PTRACE_SETDSPREGS:
  354. return copy_regset_from_user(child, &user_sh_native_view,
  355. REGSET_DSP,
  356. 0, sizeof(struct pt_dspregs),
  357. (const void __user *)data);
  358. #endif
  359. #ifdef CONFIG_BINFMT_ELF_FDPIC
  360. case PTRACE_GETFDPIC: {
  361. unsigned long tmp = 0;
  362. switch (addr) {
  363. case PTRACE_GETFDPIC_EXEC:
  364. tmp = child->mm->context.exec_fdpic_loadmap;
  365. break;
  366. case PTRACE_GETFDPIC_INTERP:
  367. tmp = child->mm->context.interp_fdpic_loadmap;
  368. break;
  369. default:
  370. break;
  371. }
  372. ret = 0;
  373. if (put_user(tmp, datap)) {
  374. ret = -EFAULT;
  375. break;
  376. }
  377. break;
  378. }
  379. #endif
  380. default:
  381. ret = ptrace_request(child, request, addr, data);
  382. break;
  383. }
  384. return ret;
  385. }
  386. static inline int audit_arch(void)
  387. {
  388. int arch = EM_SH;
  389. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  390. arch |= __AUDIT_ARCH_LE;
  391. #endif
  392. return arch;
  393. }
  394. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  395. {
  396. long ret = 0;
  397. secure_computing(regs->regs[0]);
  398. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  399. tracehook_report_syscall_entry(regs))
  400. /*
  401. * Tracing decided this syscall should not happen.
  402. * We'll return a bogus call number to get an ENOSYS
  403. * error, but leave the original number in regs->regs[0].
  404. */
  405. ret = -1L;
  406. if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
  407. ftrace_syscall_enter(regs);
  408. if (unlikely(current->audit_context))
  409. audit_syscall_entry(audit_arch(), regs->regs[3],
  410. regs->regs[4], regs->regs[5],
  411. regs->regs[6], regs->regs[7]);
  412. return ret ?: regs->regs[0];
  413. }
  414. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  415. {
  416. int step;
  417. if (unlikely(current->audit_context))
  418. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  419. regs->regs[0]);
  420. if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
  421. ftrace_syscall_exit(regs);
  422. step = test_thread_flag(TIF_SINGLESTEP);
  423. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  424. tracehook_report_syscall_exit(regs, step);
  425. }