ptrace_32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/slab.h>
  21. #include <linux/security.h>
  22. #include <linux/signal.h>
  23. #include <linux/io.h>
  24. #include <linux/audit.h>
  25. #include <linux/seccomp.h>
  26. #include <linux/tracehook.h>
  27. #include <linux/elf.h>
  28. #include <linux/regset.h>
  29. #include <linux/hw_breakpoint.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/system.h>
  33. #include <asm/processor.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/syscalls.h>
  36. #include <asm/fpu.h>
  37. #define CREATE_TRACE_POINTS
  38. #include <trace/events/syscalls.h>
  39. /*
  40. * This routine will get a word off of the process kernel stack.
  41. */
  42. static inline int get_stack_long(struct task_struct *task, int offset)
  43. {
  44. unsigned char *stack;
  45. stack = (unsigned char *)task_pt_regs(task);
  46. stack += offset;
  47. return (*((int *)stack));
  48. }
  49. /*
  50. * This routine will put a word on the process kernel stack.
  51. */
  52. static inline int put_stack_long(struct task_struct *task, int offset,
  53. unsigned long data)
  54. {
  55. unsigned char *stack;
  56. stack = (unsigned char *)task_pt_regs(task);
  57. stack += offset;
  58. *(unsigned long *) stack = data;
  59. return 0;
  60. }
  61. void ptrace_triggered(struct perf_event *bp, int nmi,
  62. struct perf_sample_data *data, struct pt_regs *regs)
  63. {
  64. struct perf_event_attr attr;
  65. /*
  66. * Disable the breakpoint request here since ptrace has defined a
  67. * one-shot behaviour for breakpoint exceptions.
  68. */
  69. attr = bp->attr;
  70. attr.disabled = true;
  71. modify_user_hw_breakpoint(bp, &attr);
  72. }
  73. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  74. {
  75. struct thread_struct *thread = &tsk->thread;
  76. struct perf_event *bp;
  77. struct perf_event_attr attr;
  78. bp = thread->ptrace_bps[0];
  79. if (!bp) {
  80. hw_breakpoint_init(&attr);
  81. attr.bp_addr = addr;
  82. attr.bp_len = HW_BREAKPOINT_LEN_2;
  83. attr.bp_type = HW_BREAKPOINT_R;
  84. bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
  85. if (IS_ERR(bp))
  86. return PTR_ERR(bp);
  87. thread->ptrace_bps[0] = bp;
  88. } else {
  89. int err;
  90. attr = bp->attr;
  91. attr.bp_addr = addr;
  92. err = modify_user_hw_breakpoint(bp, &attr);
  93. if (unlikely(err))
  94. return err;
  95. }
  96. return 0;
  97. }
  98. void user_enable_single_step(struct task_struct *child)
  99. {
  100. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  101. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  102. set_single_step(child, pc);
  103. }
  104. void user_disable_single_step(struct task_struct *child)
  105. {
  106. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  107. }
  108. /*
  109. * Called by kernel/ptrace.c when detaching..
  110. *
  111. * Make sure single step bits etc are not set.
  112. */
  113. void ptrace_disable(struct task_struct *child)
  114. {
  115. user_disable_single_step(child);
  116. }
  117. static int genregs_get(struct task_struct *target,
  118. const struct user_regset *regset,
  119. unsigned int pos, unsigned int count,
  120. void *kbuf, void __user *ubuf)
  121. {
  122. const struct pt_regs *regs = task_pt_regs(target);
  123. int ret;
  124. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  125. regs->regs,
  126. 0, 16 * sizeof(unsigned long));
  127. if (!ret)
  128. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  129. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  130. &regs->pc,
  131. offsetof(struct pt_regs, pc),
  132. sizeof(struct pt_regs));
  133. if (!ret)
  134. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  135. sizeof(struct pt_regs), -1);
  136. return ret;
  137. }
  138. static int genregs_set(struct task_struct *target,
  139. const struct user_regset *regset,
  140. unsigned int pos, unsigned int count,
  141. const void *kbuf, const void __user *ubuf)
  142. {
  143. struct pt_regs *regs = task_pt_regs(target);
  144. int ret;
  145. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  146. regs->regs,
  147. 0, 16 * sizeof(unsigned long));
  148. if (!ret && count > 0)
  149. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  150. &regs->pc,
  151. offsetof(struct pt_regs, pc),
  152. sizeof(struct pt_regs));
  153. if (!ret)
  154. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  155. sizeof(struct pt_regs), -1);
  156. return ret;
  157. }
  158. #ifdef CONFIG_SH_FPU
  159. int fpregs_get(struct task_struct *target,
  160. const struct user_regset *regset,
  161. unsigned int pos, unsigned int count,
  162. void *kbuf, void __user *ubuf)
  163. {
  164. int ret;
  165. ret = init_fpu(target);
  166. if (ret)
  167. return ret;
  168. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  169. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  170. &target->thread.xstate->hardfpu, 0, -1);
  171. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  172. &target->thread.xstate->softfpu, 0, -1);
  173. }
  174. static int fpregs_set(struct task_struct *target,
  175. const struct user_regset *regset,
  176. unsigned int pos, unsigned int count,
  177. const void *kbuf, const void __user *ubuf)
  178. {
  179. int ret;
  180. ret = init_fpu(target);
  181. if (ret)
  182. return ret;
  183. set_stopped_child_used_math(target);
  184. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  185. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  186. &target->thread.xstate->hardfpu, 0, -1);
  187. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  188. &target->thread.xstate->softfpu, 0, -1);
  189. }
  190. static int fpregs_active(struct task_struct *target,
  191. const struct user_regset *regset)
  192. {
  193. return tsk_used_math(target) ? regset->n : 0;
  194. }
  195. #endif
  196. #ifdef CONFIG_SH_DSP
  197. static int dspregs_get(struct task_struct *target,
  198. const struct user_regset *regset,
  199. unsigned int pos, unsigned int count,
  200. void *kbuf, void __user *ubuf)
  201. {
  202. const struct pt_dspregs *regs =
  203. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  204. int ret;
  205. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  206. 0, sizeof(struct pt_dspregs));
  207. if (!ret)
  208. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  209. sizeof(struct pt_dspregs), -1);
  210. return ret;
  211. }
  212. static int dspregs_set(struct task_struct *target,
  213. const struct user_regset *regset,
  214. unsigned int pos, unsigned int count,
  215. const void *kbuf, const void __user *ubuf)
  216. {
  217. struct pt_dspregs *regs =
  218. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  219. int ret;
  220. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  221. 0, sizeof(struct pt_dspregs));
  222. if (!ret)
  223. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  224. sizeof(struct pt_dspregs), -1);
  225. return ret;
  226. }
  227. static int dspregs_active(struct task_struct *target,
  228. const struct user_regset *regset)
  229. {
  230. struct pt_regs *regs = task_pt_regs(target);
  231. return regs->sr & SR_DSP ? regset->n : 0;
  232. }
  233. #endif
  234. /*
  235. * These are our native regset flavours.
  236. */
  237. enum sh_regset {
  238. REGSET_GENERAL,
  239. #ifdef CONFIG_SH_FPU
  240. REGSET_FPU,
  241. #endif
  242. #ifdef CONFIG_SH_DSP
  243. REGSET_DSP,
  244. #endif
  245. };
  246. static const struct user_regset sh_regsets[] = {
  247. /*
  248. * Format is:
  249. * R0 --> R15
  250. * PC, PR, SR, GBR, MACH, MACL, TRA
  251. */
  252. [REGSET_GENERAL] = {
  253. .core_note_type = NT_PRSTATUS,
  254. .n = ELF_NGREG,
  255. .size = sizeof(long),
  256. .align = sizeof(long),
  257. .get = genregs_get,
  258. .set = genregs_set,
  259. },
  260. #ifdef CONFIG_SH_FPU
  261. [REGSET_FPU] = {
  262. .core_note_type = NT_PRFPREG,
  263. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  264. .size = sizeof(long),
  265. .align = sizeof(long),
  266. .get = fpregs_get,
  267. .set = fpregs_set,
  268. .active = fpregs_active,
  269. },
  270. #endif
  271. #ifdef CONFIG_SH_DSP
  272. [REGSET_DSP] = {
  273. .n = sizeof(struct pt_dspregs) / sizeof(long),
  274. .size = sizeof(long),
  275. .align = sizeof(long),
  276. .get = dspregs_get,
  277. .set = dspregs_set,
  278. .active = dspregs_active,
  279. },
  280. #endif
  281. };
  282. static const struct user_regset_view user_sh_native_view = {
  283. .name = "sh",
  284. .e_machine = EM_SH,
  285. .regsets = sh_regsets,
  286. .n = ARRAY_SIZE(sh_regsets),
  287. };
  288. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  289. {
  290. return &user_sh_native_view;
  291. }
  292. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  293. {
  294. struct user * dummy = NULL;
  295. unsigned long __user *datap = (unsigned long __user *)data;
  296. int ret;
  297. switch (request) {
  298. /* read the word at location addr in the USER area. */
  299. case PTRACE_PEEKUSR: {
  300. unsigned long tmp;
  301. ret = -EIO;
  302. if ((addr & 3) || addr < 0 ||
  303. addr > sizeof(struct user) - 3)
  304. break;
  305. if (addr < sizeof(struct pt_regs))
  306. tmp = get_stack_long(child, addr);
  307. else if (addr >= (long) &dummy->fpu &&
  308. addr < (long) &dummy->u_fpvalid) {
  309. if (!tsk_used_math(child)) {
  310. if (addr == (long)&dummy->fpu.fpscr)
  311. tmp = FPSCR_INIT;
  312. else
  313. tmp = 0;
  314. } else
  315. tmp = ((long *)child->thread.xstate)
  316. [(addr - (long)&dummy->fpu) >> 2];
  317. } else if (addr == (long) &dummy->u_fpvalid)
  318. tmp = !!tsk_used_math(child);
  319. else if (addr == PT_TEXT_ADDR)
  320. tmp = child->mm->start_code;
  321. else if (addr == PT_DATA_ADDR)
  322. tmp = child->mm->start_data;
  323. else if (addr == PT_TEXT_END_ADDR)
  324. tmp = child->mm->end_code;
  325. else if (addr == PT_TEXT_LEN)
  326. tmp = child->mm->end_code - child->mm->start_code;
  327. else
  328. tmp = 0;
  329. ret = put_user(tmp, datap);
  330. break;
  331. }
  332. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  333. ret = -EIO;
  334. if ((addr & 3) || addr < 0 ||
  335. addr > sizeof(struct user) - 3)
  336. break;
  337. if (addr < sizeof(struct pt_regs))
  338. ret = put_stack_long(child, addr, data);
  339. else if (addr >= (long) &dummy->fpu &&
  340. addr < (long) &dummy->u_fpvalid) {
  341. set_stopped_child_used_math(child);
  342. ((long *)child->thread.xstate)
  343. [(addr - (long)&dummy->fpu) >> 2] = data;
  344. ret = 0;
  345. } else if (addr == (long) &dummy->u_fpvalid) {
  346. conditional_stopped_child_used_math(data, child);
  347. ret = 0;
  348. }
  349. break;
  350. case PTRACE_GETREGS:
  351. return copy_regset_to_user(child, &user_sh_native_view,
  352. REGSET_GENERAL,
  353. 0, sizeof(struct pt_regs),
  354. (void __user *)data);
  355. case PTRACE_SETREGS:
  356. return copy_regset_from_user(child, &user_sh_native_view,
  357. REGSET_GENERAL,
  358. 0, sizeof(struct pt_regs),
  359. (const void __user *)data);
  360. #ifdef CONFIG_SH_FPU
  361. case PTRACE_GETFPREGS:
  362. return copy_regset_to_user(child, &user_sh_native_view,
  363. REGSET_FPU,
  364. 0, sizeof(struct user_fpu_struct),
  365. (void __user *)data);
  366. case PTRACE_SETFPREGS:
  367. return copy_regset_from_user(child, &user_sh_native_view,
  368. REGSET_FPU,
  369. 0, sizeof(struct user_fpu_struct),
  370. (const void __user *)data);
  371. #endif
  372. #ifdef CONFIG_SH_DSP
  373. case PTRACE_GETDSPREGS:
  374. return copy_regset_to_user(child, &user_sh_native_view,
  375. REGSET_DSP,
  376. 0, sizeof(struct pt_dspregs),
  377. (void __user *)data);
  378. case PTRACE_SETDSPREGS:
  379. return copy_regset_from_user(child, &user_sh_native_view,
  380. REGSET_DSP,
  381. 0, sizeof(struct pt_dspregs),
  382. (const void __user *)data);
  383. #endif
  384. #ifdef CONFIG_BINFMT_ELF_FDPIC
  385. case PTRACE_GETFDPIC: {
  386. unsigned long tmp = 0;
  387. switch (addr) {
  388. case PTRACE_GETFDPIC_EXEC:
  389. tmp = child->mm->context.exec_fdpic_loadmap;
  390. break;
  391. case PTRACE_GETFDPIC_INTERP:
  392. tmp = child->mm->context.interp_fdpic_loadmap;
  393. break;
  394. default:
  395. break;
  396. }
  397. ret = 0;
  398. if (put_user(tmp, datap)) {
  399. ret = -EFAULT;
  400. break;
  401. }
  402. break;
  403. }
  404. #endif
  405. default:
  406. ret = ptrace_request(child, request, addr, data);
  407. break;
  408. }
  409. return ret;
  410. }
  411. static inline int audit_arch(void)
  412. {
  413. int arch = EM_SH;
  414. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  415. arch |= __AUDIT_ARCH_LE;
  416. #endif
  417. return arch;
  418. }
  419. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  420. {
  421. long ret = 0;
  422. secure_computing(regs->regs[0]);
  423. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  424. tracehook_report_syscall_entry(regs))
  425. /*
  426. * Tracing decided this syscall should not happen.
  427. * We'll return a bogus call number to get an ENOSYS
  428. * error, but leave the original number in regs->regs[0].
  429. */
  430. ret = -1L;
  431. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  432. trace_sys_enter(regs, regs->regs[0]);
  433. if (unlikely(current->audit_context))
  434. audit_syscall_entry(audit_arch(), regs->regs[3],
  435. regs->regs[4], regs->regs[5],
  436. regs->regs[6], regs->regs[7]);
  437. return ret ?: regs->regs[0];
  438. }
  439. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  440. {
  441. int step;
  442. if (unlikely(current->audit_context))
  443. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  444. regs->regs[0]);
  445. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  446. trace_sys_exit(regs, regs->regs[0]);
  447. step = test_thread_flag(TIF_SINGLESTEP);
  448. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  449. tracehook_report_syscall_exit(regs, step);
  450. }