ptrace_32.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/signal.h>
  22. #include <linux/io.h>
  23. #include <linux/audit.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/elf.h>
  27. #include <linux/regset.h>
  28. #include <linux/hw_breakpoint.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/system.h>
  32. #include <asm/processor.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/syscalls.h>
  35. #include <asm/fpu.h>
  36. #define CREATE_TRACE_POINTS
  37. #include <trace/events/syscalls.h>
  38. /*
  39. * This routine will get a word off of the process kernel stack.
  40. */
  41. static inline int get_stack_long(struct task_struct *task, int offset)
  42. {
  43. unsigned char *stack;
  44. stack = (unsigned char *)task_pt_regs(task);
  45. stack += offset;
  46. return (*((int *)stack));
  47. }
  48. /*
  49. * This routine will put a word on the process kernel stack.
  50. */
  51. static inline int put_stack_long(struct task_struct *task, int offset,
  52. unsigned long data)
  53. {
  54. unsigned char *stack;
  55. stack = (unsigned char *)task_pt_regs(task);
  56. stack += offset;
  57. *(unsigned long *) stack = data;
  58. return 0;
  59. }
  60. void ptrace_triggered(struct perf_event *bp, int nmi,
  61. struct perf_sample_data *data, struct pt_regs *regs)
  62. {
  63. struct perf_event_attr attr;
  64. /*
  65. * Disable the breakpoint request here since ptrace has defined a
  66. * one-shot behaviour for breakpoint exceptions.
  67. */
  68. attr = bp->attr;
  69. attr.disabled = true;
  70. modify_user_hw_breakpoint(bp, &attr);
  71. }
  72. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  73. {
  74. struct thread_struct *thread = &tsk->thread;
  75. struct perf_event *bp;
  76. struct perf_event_attr attr;
  77. bp = thread->ptrace_bps[0];
  78. if (!bp) {
  79. ptrace_breakpoint_init(&attr);
  80. attr.bp_addr = addr;
  81. attr.bp_len = HW_BREAKPOINT_LEN_2;
  82. attr.bp_type = HW_BREAKPOINT_R;
  83. bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
  84. if (IS_ERR(bp))
  85. return PTR_ERR(bp);
  86. thread->ptrace_bps[0] = bp;
  87. } else {
  88. int err;
  89. attr = bp->attr;
  90. attr.bp_addr = addr;
  91. err = modify_user_hw_breakpoint(bp, &attr);
  92. if (unlikely(err))
  93. return err;
  94. }
  95. return 0;
  96. }
  97. void user_enable_single_step(struct task_struct *child)
  98. {
  99. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  100. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  101. set_single_step(child, pc);
  102. }
  103. void user_disable_single_step(struct task_struct *child)
  104. {
  105. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  106. }
  107. /*
  108. * Called by kernel/ptrace.c when detaching..
  109. *
  110. * Make sure single step bits etc are not set.
  111. */
  112. void ptrace_disable(struct task_struct *child)
  113. {
  114. user_disable_single_step(child);
  115. }
  116. static int genregs_get(struct task_struct *target,
  117. const struct user_regset *regset,
  118. unsigned int pos, unsigned int count,
  119. void *kbuf, void __user *ubuf)
  120. {
  121. const struct pt_regs *regs = task_pt_regs(target);
  122. int ret;
  123. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  124. regs->regs,
  125. 0, 16 * sizeof(unsigned long));
  126. if (!ret)
  127. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  128. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  129. &regs->pc,
  130. offsetof(struct pt_regs, pc),
  131. sizeof(struct pt_regs));
  132. if (!ret)
  133. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  134. sizeof(struct pt_regs), -1);
  135. return ret;
  136. }
  137. static int genregs_set(struct task_struct *target,
  138. const struct user_regset *regset,
  139. unsigned int pos, unsigned int count,
  140. const void *kbuf, const void __user *ubuf)
  141. {
  142. struct pt_regs *regs = task_pt_regs(target);
  143. int ret;
  144. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  145. regs->regs,
  146. 0, 16 * sizeof(unsigned long));
  147. if (!ret && count > 0)
  148. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  149. &regs->pc,
  150. offsetof(struct pt_regs, pc),
  151. sizeof(struct pt_regs));
  152. if (!ret)
  153. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  154. sizeof(struct pt_regs), -1);
  155. return ret;
  156. }
  157. #ifdef CONFIG_SH_FPU
  158. int fpregs_get(struct task_struct *target,
  159. const struct user_regset *regset,
  160. unsigned int pos, unsigned int count,
  161. void *kbuf, void __user *ubuf)
  162. {
  163. int ret;
  164. ret = init_fpu(target);
  165. if (ret)
  166. return ret;
  167. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  168. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  169. &target->thread.xstate->hardfpu, 0, -1);
  170. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  171. &target->thread.xstate->softfpu, 0, -1);
  172. }
  173. static int fpregs_set(struct task_struct *target,
  174. const struct user_regset *regset,
  175. unsigned int pos, unsigned int count,
  176. const void *kbuf, const void __user *ubuf)
  177. {
  178. int ret;
  179. ret = init_fpu(target);
  180. if (ret)
  181. return ret;
  182. set_stopped_child_used_math(target);
  183. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  184. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  185. &target->thread.xstate->hardfpu, 0, -1);
  186. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  187. &target->thread.xstate->softfpu, 0, -1);
  188. }
  189. static int fpregs_active(struct task_struct *target,
  190. const struct user_regset *regset)
  191. {
  192. return tsk_used_math(target) ? regset->n : 0;
  193. }
  194. #endif
  195. #ifdef CONFIG_SH_DSP
  196. static int dspregs_get(struct task_struct *target,
  197. const struct user_regset *regset,
  198. unsigned int pos, unsigned int count,
  199. void *kbuf, void __user *ubuf)
  200. {
  201. const struct pt_dspregs *regs =
  202. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  203. int ret;
  204. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  205. 0, sizeof(struct pt_dspregs));
  206. if (!ret)
  207. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  208. sizeof(struct pt_dspregs), -1);
  209. return ret;
  210. }
  211. static int dspregs_set(struct task_struct *target,
  212. const struct user_regset *regset,
  213. unsigned int pos, unsigned int count,
  214. const void *kbuf, const void __user *ubuf)
  215. {
  216. struct pt_dspregs *regs =
  217. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  218. int ret;
  219. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  220. 0, sizeof(struct pt_dspregs));
  221. if (!ret)
  222. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  223. sizeof(struct pt_dspregs), -1);
  224. return ret;
  225. }
  226. static int dspregs_active(struct task_struct *target,
  227. const struct user_regset *regset)
  228. {
  229. struct pt_regs *regs = task_pt_regs(target);
  230. return regs->sr & SR_DSP ? regset->n : 0;
  231. }
  232. #endif
  233. const struct pt_regs_offset regoffset_table[] = {
  234. REGS_OFFSET_NAME(0),
  235. REGS_OFFSET_NAME(1),
  236. REGS_OFFSET_NAME(2),
  237. REGS_OFFSET_NAME(3),
  238. REGS_OFFSET_NAME(4),
  239. REGS_OFFSET_NAME(5),
  240. REGS_OFFSET_NAME(6),
  241. REGS_OFFSET_NAME(7),
  242. REGS_OFFSET_NAME(8),
  243. REGS_OFFSET_NAME(9),
  244. REGS_OFFSET_NAME(10),
  245. REGS_OFFSET_NAME(11),
  246. REGS_OFFSET_NAME(12),
  247. REGS_OFFSET_NAME(13),
  248. REGS_OFFSET_NAME(14),
  249. REGS_OFFSET_NAME(15),
  250. REG_OFFSET_NAME(pc),
  251. REG_OFFSET_NAME(pr),
  252. REG_OFFSET_NAME(sr),
  253. REG_OFFSET_NAME(gbr),
  254. REG_OFFSET_NAME(mach),
  255. REG_OFFSET_NAME(macl),
  256. REG_OFFSET_NAME(tra),
  257. REG_OFFSET_END,
  258. };
  259. /*
  260. * These are our native regset flavours.
  261. */
  262. enum sh_regset {
  263. REGSET_GENERAL,
  264. #ifdef CONFIG_SH_FPU
  265. REGSET_FPU,
  266. #endif
  267. #ifdef CONFIG_SH_DSP
  268. REGSET_DSP,
  269. #endif
  270. };
  271. static const struct user_regset sh_regsets[] = {
  272. /*
  273. * Format is:
  274. * R0 --> R15
  275. * PC, PR, SR, GBR, MACH, MACL, TRA
  276. */
  277. [REGSET_GENERAL] = {
  278. .core_note_type = NT_PRSTATUS,
  279. .n = ELF_NGREG,
  280. .size = sizeof(long),
  281. .align = sizeof(long),
  282. .get = genregs_get,
  283. .set = genregs_set,
  284. },
  285. #ifdef CONFIG_SH_FPU
  286. [REGSET_FPU] = {
  287. .core_note_type = NT_PRFPREG,
  288. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  289. .size = sizeof(long),
  290. .align = sizeof(long),
  291. .get = fpregs_get,
  292. .set = fpregs_set,
  293. .active = fpregs_active,
  294. },
  295. #endif
  296. #ifdef CONFIG_SH_DSP
  297. [REGSET_DSP] = {
  298. .n = sizeof(struct pt_dspregs) / sizeof(long),
  299. .size = sizeof(long),
  300. .align = sizeof(long),
  301. .get = dspregs_get,
  302. .set = dspregs_set,
  303. .active = dspregs_active,
  304. },
  305. #endif
  306. };
  307. static const struct user_regset_view user_sh_native_view = {
  308. .name = "sh",
  309. .e_machine = EM_SH,
  310. .regsets = sh_regsets,
  311. .n = ARRAY_SIZE(sh_regsets),
  312. };
  313. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  314. {
  315. return &user_sh_native_view;
  316. }
  317. long arch_ptrace(struct task_struct *child, long request,
  318. unsigned long addr, unsigned long data)
  319. {
  320. unsigned long __user *datap = (unsigned long __user *)data;
  321. int ret;
  322. switch (request) {
  323. /* read the word at location addr in the USER area. */
  324. case PTRACE_PEEKUSR: {
  325. unsigned long tmp;
  326. ret = -EIO;
  327. if ((addr & 3) || addr < 0 ||
  328. addr > sizeof(struct user) - 3)
  329. break;
  330. if (addr < sizeof(struct pt_regs))
  331. tmp = get_stack_long(child, addr);
  332. else if (addr >= offsetof(struct user, fpu) &&
  333. addr < offsetof(struct user, u_fpvalid)) {
  334. if (!tsk_used_math(child)) {
  335. if (addr == offsetof(struct user, fpu.fpscr))
  336. tmp = FPSCR_INIT;
  337. else
  338. tmp = 0;
  339. } else {
  340. unsigned long index;
  341. index = addr - offsetof(struct user, fpu);
  342. tmp = ((unsigned long *)child->thread.xstate)
  343. [index >> 2];
  344. }
  345. } else if (addr == offsetof(struct user, u_fpvalid))
  346. tmp = !!tsk_used_math(child);
  347. else if (addr == PT_TEXT_ADDR)
  348. tmp = child->mm->start_code;
  349. else if (addr == PT_DATA_ADDR)
  350. tmp = child->mm->start_data;
  351. else if (addr == PT_TEXT_END_ADDR)
  352. tmp = child->mm->end_code;
  353. else if (addr == PT_TEXT_LEN)
  354. tmp = child->mm->end_code - child->mm->start_code;
  355. else
  356. tmp = 0;
  357. ret = put_user(tmp, datap);
  358. break;
  359. }
  360. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  361. ret = -EIO;
  362. if ((addr & 3) || addr < 0 ||
  363. addr > sizeof(struct user) - 3)
  364. break;
  365. if (addr < sizeof(struct pt_regs))
  366. ret = put_stack_long(child, addr, data);
  367. else if (addr >= offsetof(struct user, fpu) &&
  368. addr < offsetof(struct user, u_fpvalid)) {
  369. unsigned long index;
  370. index = addr - offsetof(struct user, fpu);
  371. set_stopped_child_used_math(child);
  372. ((unsigned long *)child->thread.xstate)
  373. [index >> 2] = data;
  374. ret = 0;
  375. } else if (addr == offsetof(struct user, u_fpvalid)) {
  376. conditional_stopped_child_used_math(data, child);
  377. ret = 0;
  378. }
  379. break;
  380. case PTRACE_GETREGS:
  381. return copy_regset_to_user(child, &user_sh_native_view,
  382. REGSET_GENERAL,
  383. 0, sizeof(struct pt_regs),
  384. datap);
  385. case PTRACE_SETREGS:
  386. return copy_regset_from_user(child, &user_sh_native_view,
  387. REGSET_GENERAL,
  388. 0, sizeof(struct pt_regs),
  389. datap);
  390. #ifdef CONFIG_SH_FPU
  391. case PTRACE_GETFPREGS:
  392. return copy_regset_to_user(child, &user_sh_native_view,
  393. REGSET_FPU,
  394. 0, sizeof(struct user_fpu_struct),
  395. datap);
  396. case PTRACE_SETFPREGS:
  397. return copy_regset_from_user(child, &user_sh_native_view,
  398. REGSET_FPU,
  399. 0, sizeof(struct user_fpu_struct),
  400. datap);
  401. #endif
  402. #ifdef CONFIG_SH_DSP
  403. case PTRACE_GETDSPREGS:
  404. return copy_regset_to_user(child, &user_sh_native_view,
  405. REGSET_DSP,
  406. 0, sizeof(struct pt_dspregs),
  407. datap);
  408. case PTRACE_SETDSPREGS:
  409. return copy_regset_from_user(child, &user_sh_native_view,
  410. REGSET_DSP,
  411. 0, sizeof(struct pt_dspregs),
  412. datap);
  413. #endif
  414. default:
  415. ret = ptrace_request(child, request, addr, data);
  416. break;
  417. }
  418. return ret;
  419. }
  420. static inline int audit_arch(void)
  421. {
  422. int arch = EM_SH;
  423. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  424. arch |= __AUDIT_ARCH_LE;
  425. #endif
  426. return arch;
  427. }
  428. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  429. {
  430. long ret = 0;
  431. secure_computing(regs->regs[0]);
  432. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  433. tracehook_report_syscall_entry(regs))
  434. /*
  435. * Tracing decided this syscall should not happen.
  436. * We'll return a bogus call number to get an ENOSYS
  437. * error, but leave the original number in regs->regs[0].
  438. */
  439. ret = -1L;
  440. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  441. trace_sys_enter(regs, regs->regs[0]);
  442. if (unlikely(current->audit_context))
  443. audit_syscall_entry(audit_arch(), regs->regs[3],
  444. regs->regs[4], regs->regs[5],
  445. regs->regs[6], regs->regs[7]);
  446. return ret ?: regs->regs[0];
  447. }
  448. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  449. {
  450. int step;
  451. if (unlikely(current->audit_context))
  452. audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
  453. regs->regs[0]);
  454. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  455. trace_sys_exit(regs, regs->regs[0]);
  456. step = test_thread_flag(TIF_SINGLESTEP);
  457. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  458. tracehook_report_syscall_exit(regs, step);
  459. }