ptrace_32.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/signal.h>
  22. #include <linux/io.h>
  23. #include <linux/audit.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/elf.h>
  27. #include <linux/regset.h>
  28. #include <linux/hw_breakpoint.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/syscalls.h>
  35. #include <asm/fpu.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/syscalls.h>
  38. /*
  39. * This routine will get a word off of the process kernel stack.
  40. */
  41. static inline int get_stack_long(struct task_struct *task, int offset)
  42. {
  43. unsigned char *stack;
  44. stack = (unsigned char *)task_pt_regs(task);
  45. stack += offset;
  46. return (*((int *)stack));
  47. }
  48. /*
  49. * This routine will put a word on the process kernel stack.
  50. */
  51. static inline int put_stack_long(struct task_struct *task, int offset,
  52. unsigned long data)
  53. {
  54. unsigned char *stack;
  55. stack = (unsigned char *)task_pt_regs(task);
  56. stack += offset;
  57. *(unsigned long *) stack = data;
  58. return 0;
  59. }
  60. void ptrace_triggered(struct perf_event *bp,
  61. struct perf_sample_data *data, struct pt_regs *regs)
  62. {
  63. struct perf_event_attr attr;
  64. /*
  65. * Disable the breakpoint request here since ptrace has defined a
  66. * one-shot behaviour for breakpoint exceptions.
  67. */
  68. attr = bp->attr;
  69. attr.disabled = true;
  70. modify_user_hw_breakpoint(bp, &attr);
  71. }
  72. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  73. {
  74. struct thread_struct *thread = &tsk->thread;
  75. struct perf_event *bp;
  76. struct perf_event_attr attr;
  77. bp = thread->ptrace_bps[0];
  78. if (!bp) {
  79. ptrace_breakpoint_init(&attr);
  80. attr.bp_addr = addr;
  81. attr.bp_len = HW_BREAKPOINT_LEN_2;
  82. attr.bp_type = HW_BREAKPOINT_R;
  83. bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
  84. NULL, tsk);
  85. if (IS_ERR(bp))
  86. return PTR_ERR(bp);
  87. thread->ptrace_bps[0] = bp;
  88. } else {
  89. int err;
  90. attr = bp->attr;
  91. attr.bp_addr = addr;
  92. /* reenable breakpoint */
  93. attr.disabled = false;
  94. err = modify_user_hw_breakpoint(bp, &attr);
  95. if (unlikely(err))
  96. return err;
  97. }
  98. return 0;
  99. }
  100. void user_enable_single_step(struct task_struct *child)
  101. {
  102. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  103. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  104. if (ptrace_get_breakpoints(child) < 0)
  105. return;
  106. set_single_step(child, pc);
  107. ptrace_put_breakpoints(child);
  108. }
  109. void user_disable_single_step(struct task_struct *child)
  110. {
  111. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  112. }
  113. /*
  114. * Called by kernel/ptrace.c when detaching..
  115. *
  116. * Make sure single step bits etc are not set.
  117. */
  118. void ptrace_disable(struct task_struct *child)
  119. {
  120. user_disable_single_step(child);
  121. }
  122. static int genregs_get(struct task_struct *target,
  123. const struct user_regset *regset,
  124. unsigned int pos, unsigned int count,
  125. void *kbuf, void __user *ubuf)
  126. {
  127. const struct pt_regs *regs = task_pt_regs(target);
  128. int ret;
  129. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  130. regs->regs,
  131. 0, 16 * sizeof(unsigned long));
  132. if (!ret)
  133. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  134. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  135. &regs->pc,
  136. offsetof(struct pt_regs, pc),
  137. sizeof(struct pt_regs));
  138. if (!ret)
  139. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  140. sizeof(struct pt_regs), -1);
  141. return ret;
  142. }
  143. static int genregs_set(struct task_struct *target,
  144. const struct user_regset *regset,
  145. unsigned int pos, unsigned int count,
  146. const void *kbuf, const void __user *ubuf)
  147. {
  148. struct pt_regs *regs = task_pt_regs(target);
  149. int ret;
  150. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  151. regs->regs,
  152. 0, 16 * sizeof(unsigned long));
  153. if (!ret && count > 0)
  154. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  155. &regs->pc,
  156. offsetof(struct pt_regs, pc),
  157. sizeof(struct pt_regs));
  158. if (!ret)
  159. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  160. sizeof(struct pt_regs), -1);
  161. return ret;
  162. }
  163. #ifdef CONFIG_SH_FPU
  164. int fpregs_get(struct task_struct *target,
  165. const struct user_regset *regset,
  166. unsigned int pos, unsigned int count,
  167. void *kbuf, void __user *ubuf)
  168. {
  169. int ret;
  170. ret = init_fpu(target);
  171. if (ret)
  172. return ret;
  173. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  174. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  175. &target->thread.xstate->hardfpu, 0, -1);
  176. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  177. &target->thread.xstate->softfpu, 0, -1);
  178. }
  179. static int fpregs_set(struct task_struct *target,
  180. const struct user_regset *regset,
  181. unsigned int pos, unsigned int count,
  182. const void *kbuf, const void __user *ubuf)
  183. {
  184. int ret;
  185. ret = init_fpu(target);
  186. if (ret)
  187. return ret;
  188. set_stopped_child_used_math(target);
  189. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  190. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  191. &target->thread.xstate->hardfpu, 0, -1);
  192. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  193. &target->thread.xstate->softfpu, 0, -1);
  194. }
  195. static int fpregs_active(struct task_struct *target,
  196. const struct user_regset *regset)
  197. {
  198. return tsk_used_math(target) ? regset->n : 0;
  199. }
  200. #endif
  201. #ifdef CONFIG_SH_DSP
  202. static int dspregs_get(struct task_struct *target,
  203. const struct user_regset *regset,
  204. unsigned int pos, unsigned int count,
  205. void *kbuf, void __user *ubuf)
  206. {
  207. const struct pt_dspregs *regs =
  208. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  209. int ret;
  210. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  211. 0, sizeof(struct pt_dspregs));
  212. if (!ret)
  213. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  214. sizeof(struct pt_dspregs), -1);
  215. return ret;
  216. }
  217. static int dspregs_set(struct task_struct *target,
  218. const struct user_regset *regset,
  219. unsigned int pos, unsigned int count,
  220. const void *kbuf, const void __user *ubuf)
  221. {
  222. struct pt_dspregs *regs =
  223. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  224. int ret;
  225. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  226. 0, sizeof(struct pt_dspregs));
  227. if (!ret)
  228. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  229. sizeof(struct pt_dspregs), -1);
  230. return ret;
  231. }
  232. static int dspregs_active(struct task_struct *target,
  233. const struct user_regset *regset)
  234. {
  235. struct pt_regs *regs = task_pt_regs(target);
  236. return regs->sr & SR_DSP ? regset->n : 0;
  237. }
  238. #endif
  239. const struct pt_regs_offset regoffset_table[] = {
  240. REGS_OFFSET_NAME(0),
  241. REGS_OFFSET_NAME(1),
  242. REGS_OFFSET_NAME(2),
  243. REGS_OFFSET_NAME(3),
  244. REGS_OFFSET_NAME(4),
  245. REGS_OFFSET_NAME(5),
  246. REGS_OFFSET_NAME(6),
  247. REGS_OFFSET_NAME(7),
  248. REGS_OFFSET_NAME(8),
  249. REGS_OFFSET_NAME(9),
  250. REGS_OFFSET_NAME(10),
  251. REGS_OFFSET_NAME(11),
  252. REGS_OFFSET_NAME(12),
  253. REGS_OFFSET_NAME(13),
  254. REGS_OFFSET_NAME(14),
  255. REGS_OFFSET_NAME(15),
  256. REG_OFFSET_NAME(pc),
  257. REG_OFFSET_NAME(pr),
  258. REG_OFFSET_NAME(sr),
  259. REG_OFFSET_NAME(gbr),
  260. REG_OFFSET_NAME(mach),
  261. REG_OFFSET_NAME(macl),
  262. REG_OFFSET_NAME(tra),
  263. REG_OFFSET_END,
  264. };
  265. /*
  266. * These are our native regset flavours.
  267. */
  268. enum sh_regset {
  269. REGSET_GENERAL,
  270. #ifdef CONFIG_SH_FPU
  271. REGSET_FPU,
  272. #endif
  273. #ifdef CONFIG_SH_DSP
  274. REGSET_DSP,
  275. #endif
  276. };
  277. static const struct user_regset sh_regsets[] = {
  278. /*
  279. * Format is:
  280. * R0 --> R15
  281. * PC, PR, SR, GBR, MACH, MACL, TRA
  282. */
  283. [REGSET_GENERAL] = {
  284. .core_note_type = NT_PRSTATUS,
  285. .n = ELF_NGREG,
  286. .size = sizeof(long),
  287. .align = sizeof(long),
  288. .get = genregs_get,
  289. .set = genregs_set,
  290. },
  291. #ifdef CONFIG_SH_FPU
  292. [REGSET_FPU] = {
  293. .core_note_type = NT_PRFPREG,
  294. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  295. .size = sizeof(long),
  296. .align = sizeof(long),
  297. .get = fpregs_get,
  298. .set = fpregs_set,
  299. .active = fpregs_active,
  300. },
  301. #endif
  302. #ifdef CONFIG_SH_DSP
  303. [REGSET_DSP] = {
  304. .n = sizeof(struct pt_dspregs) / sizeof(long),
  305. .size = sizeof(long),
  306. .align = sizeof(long),
  307. .get = dspregs_get,
  308. .set = dspregs_set,
  309. .active = dspregs_active,
  310. },
  311. #endif
  312. };
  313. static const struct user_regset_view user_sh_native_view = {
  314. .name = "sh",
  315. .e_machine = EM_SH,
  316. .regsets = sh_regsets,
  317. .n = ARRAY_SIZE(sh_regsets),
  318. };
  319. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  320. {
  321. return &user_sh_native_view;
  322. }
  323. long arch_ptrace(struct task_struct *child, long request,
  324. unsigned long addr, unsigned long data)
  325. {
  326. unsigned long __user *datap = (unsigned long __user *)data;
  327. int ret;
  328. switch (request) {
  329. /* read the word at location addr in the USER area. */
  330. case PTRACE_PEEKUSR: {
  331. unsigned long tmp;
  332. ret = -EIO;
  333. if ((addr & 3) || addr < 0 ||
  334. addr > sizeof(struct user) - 3)
  335. break;
  336. if (addr < sizeof(struct pt_regs))
  337. tmp = get_stack_long(child, addr);
  338. else if (addr >= offsetof(struct user, fpu) &&
  339. addr < offsetof(struct user, u_fpvalid)) {
  340. if (!tsk_used_math(child)) {
  341. if (addr == offsetof(struct user, fpu.fpscr))
  342. tmp = FPSCR_INIT;
  343. else
  344. tmp = 0;
  345. } else {
  346. unsigned long index;
  347. ret = init_fpu(child);
  348. if (ret)
  349. break;
  350. index = addr - offsetof(struct user, fpu);
  351. tmp = ((unsigned long *)child->thread.xstate)
  352. [index >> 2];
  353. }
  354. } else if (addr == offsetof(struct user, u_fpvalid))
  355. tmp = !!tsk_used_math(child);
  356. else if (addr == PT_TEXT_ADDR)
  357. tmp = child->mm->start_code;
  358. else if (addr == PT_DATA_ADDR)
  359. tmp = child->mm->start_data;
  360. else if (addr == PT_TEXT_END_ADDR)
  361. tmp = child->mm->end_code;
  362. else if (addr == PT_TEXT_LEN)
  363. tmp = child->mm->end_code - child->mm->start_code;
  364. else
  365. tmp = 0;
  366. ret = put_user(tmp, datap);
  367. break;
  368. }
  369. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  370. ret = -EIO;
  371. if ((addr & 3) || addr < 0 ||
  372. addr > sizeof(struct user) - 3)
  373. break;
  374. if (addr < sizeof(struct pt_regs))
  375. ret = put_stack_long(child, addr, data);
  376. else if (addr >= offsetof(struct user, fpu) &&
  377. addr < offsetof(struct user, u_fpvalid)) {
  378. unsigned long index;
  379. ret = init_fpu(child);
  380. if (ret)
  381. break;
  382. index = addr - offsetof(struct user, fpu);
  383. set_stopped_child_used_math(child);
  384. ((unsigned long *)child->thread.xstate)
  385. [index >> 2] = data;
  386. ret = 0;
  387. } else if (addr == offsetof(struct user, u_fpvalid)) {
  388. conditional_stopped_child_used_math(data, child);
  389. ret = 0;
  390. }
  391. break;
  392. case PTRACE_GETREGS:
  393. return copy_regset_to_user(child, &user_sh_native_view,
  394. REGSET_GENERAL,
  395. 0, sizeof(struct pt_regs),
  396. datap);
  397. case PTRACE_SETREGS:
  398. return copy_regset_from_user(child, &user_sh_native_view,
  399. REGSET_GENERAL,
  400. 0, sizeof(struct pt_regs),
  401. datap);
  402. #ifdef CONFIG_SH_FPU
  403. case PTRACE_GETFPREGS:
  404. return copy_regset_to_user(child, &user_sh_native_view,
  405. REGSET_FPU,
  406. 0, sizeof(struct user_fpu_struct),
  407. datap);
  408. case PTRACE_SETFPREGS:
  409. return copy_regset_from_user(child, &user_sh_native_view,
  410. REGSET_FPU,
  411. 0, sizeof(struct user_fpu_struct),
  412. datap);
  413. #endif
  414. #ifdef CONFIG_SH_DSP
  415. case PTRACE_GETDSPREGS:
  416. return copy_regset_to_user(child, &user_sh_native_view,
  417. REGSET_DSP,
  418. 0, sizeof(struct pt_dspregs),
  419. datap);
  420. case PTRACE_SETDSPREGS:
  421. return copy_regset_from_user(child, &user_sh_native_view,
  422. REGSET_DSP,
  423. 0, sizeof(struct pt_dspregs),
  424. datap);
  425. #endif
  426. default:
  427. ret = ptrace_request(child, request, addr, data);
  428. break;
  429. }
  430. return ret;
  431. }
  432. static inline int audit_arch(void)
  433. {
  434. int arch = EM_SH;
  435. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  436. arch |= __AUDIT_ARCH_LE;
  437. #endif
  438. return arch;
  439. }
  440. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  441. {
  442. long ret = 0;
  443. secure_computing(regs->regs[0]);
  444. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  445. tracehook_report_syscall_entry(regs))
  446. /*
  447. * Tracing decided this syscall should not happen.
  448. * We'll return a bogus call number to get an ENOSYS
  449. * error, but leave the original number in regs->regs[0].
  450. */
  451. ret = -1L;
  452. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  453. trace_sys_enter(regs, regs->regs[0]);
  454. if (unlikely(current->audit_context))
  455. audit_syscall_entry(audit_arch(), regs->regs[3],
  456. regs->regs[4], regs->regs[5],
  457. regs->regs[6], regs->regs[7]);
  458. return ret ?: regs->regs[0];
  459. }
  460. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  461. {
  462. int step;
  463. if (unlikely(current->audit_context))
  464. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  465. regs->regs[0]);
  466. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  467. trace_sys_exit(regs, regs->regs[0]);
  468. step = test_thread_flag(TIF_SINGLESTEP);
  469. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  470. tracehook_report_syscall_exit(regs, step);
  471. }