ptrace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938
  1. /*
  2. * linux/arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/smp.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/security.h>
  19. #include <linux/init.h>
  20. #include <linux/signal.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/perf_event.h>
  23. #include <linux/hw_breakpoint.h>
  24. #include <linux/regset.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/system.h>
  27. #include <asm/traps.h>
  28. #define REG_PC 15
  29. #define REG_PSR 16
  30. /*
  31. * does not yet catch signals sent when the child dies.
  32. * in exit.c or in signal.c.
  33. */
  34. #if 0
  35. /*
  36. * Breakpoint SWI instruction: SWI &9F0001
  37. */
  38. #define BREAKINST_ARM 0xef9f0001
  39. #define BREAKINST_THUMB 0xdf00 /* fill this in later */
  40. #else
  41. /*
  42. * New breakpoints - use an undefined instruction. The ARM architecture
  43. * reference manual guarantees that the following instruction space
  44. * will produce an undefined instruction exception on all CPUs:
  45. *
  46. * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
  47. * Thumb: 1101 1110 xxxx xxxx
  48. */
  49. #define BREAKINST_ARM 0xe7f001f0
  50. #define BREAKINST_THUMB 0xde01
  51. #endif
  52. struct pt_regs_offset {
  53. const char *name;
  54. int offset;
  55. };
  56. #define REG_OFFSET_NAME(r) \
  57. {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
  58. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  59. static const struct pt_regs_offset regoffset_table[] = {
  60. REG_OFFSET_NAME(r0),
  61. REG_OFFSET_NAME(r1),
  62. REG_OFFSET_NAME(r2),
  63. REG_OFFSET_NAME(r3),
  64. REG_OFFSET_NAME(r4),
  65. REG_OFFSET_NAME(r5),
  66. REG_OFFSET_NAME(r6),
  67. REG_OFFSET_NAME(r7),
  68. REG_OFFSET_NAME(r8),
  69. REG_OFFSET_NAME(r9),
  70. REG_OFFSET_NAME(r10),
  71. REG_OFFSET_NAME(fp),
  72. REG_OFFSET_NAME(ip),
  73. REG_OFFSET_NAME(sp),
  74. REG_OFFSET_NAME(lr),
  75. REG_OFFSET_NAME(pc),
  76. REG_OFFSET_NAME(cpsr),
  77. REG_OFFSET_NAME(ORIG_r0),
  78. REG_OFFSET_END,
  79. };
  80. /**
  81. * regs_query_register_offset() - query register offset from its name
  82. * @name: the name of a register
  83. *
  84. * regs_query_register_offset() returns the offset of a register in struct
  85. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  86. */
  87. int regs_query_register_offset(const char *name)
  88. {
  89. const struct pt_regs_offset *roff;
  90. for (roff = regoffset_table; roff->name != NULL; roff++)
  91. if (!strcmp(roff->name, name))
  92. return roff->offset;
  93. return -EINVAL;
  94. }
  95. /**
  96. * regs_query_register_name() - query register name from its offset
  97. * @offset: the offset of a register in struct pt_regs.
  98. *
  99. * regs_query_register_name() returns the name of a register from its
  100. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  101. */
  102. const char *regs_query_register_name(unsigned int offset)
  103. {
  104. const struct pt_regs_offset *roff;
  105. for (roff = regoffset_table; roff->name != NULL; roff++)
  106. if (roff->offset == offset)
  107. return roff->name;
  108. return NULL;
  109. }
  110. /**
  111. * regs_within_kernel_stack() - check the address in the stack
  112. * @regs: pt_regs which contains kernel stack pointer.
  113. * @addr: address which is checked.
  114. *
  115. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  116. * If @addr is within the kernel stack, it returns true. If not, returns false.
  117. */
  118. bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  119. {
  120. return ((addr & ~(THREAD_SIZE - 1)) ==
  121. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  122. }
  123. /**
  124. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  125. * @regs: pt_regs which contains kernel stack pointer.
  126. * @n: stack entry number.
  127. *
  128. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  129. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  130. * this returns 0.
  131. */
  132. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  133. {
  134. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  135. addr += n;
  136. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  137. return *addr;
  138. else
  139. return 0;
  140. }
  141. /*
  142. * this routine will get a word off of the processes privileged stack.
  143. * the offset is how far from the base addr as stored in the THREAD.
  144. * this routine assumes that all the privileged stacks are in our
  145. * data space.
  146. */
  147. static inline long get_user_reg(struct task_struct *task, int offset)
  148. {
  149. return task_pt_regs(task)->uregs[offset];
  150. }
  151. /*
  152. * this routine will put a word on the processes privileged stack.
  153. * the offset is how far from the base addr as stored in the THREAD.
  154. * this routine assumes that all the privileged stacks are in our
  155. * data space.
  156. */
  157. static inline int
  158. put_user_reg(struct task_struct *task, int offset, long data)
  159. {
  160. struct pt_regs newregs, *regs = task_pt_regs(task);
  161. int ret = -EINVAL;
  162. newregs = *regs;
  163. newregs.uregs[offset] = data;
  164. if (valid_user_regs(&newregs)) {
  165. regs->uregs[offset] = data;
  166. ret = 0;
  167. }
  168. return ret;
  169. }
  170. /*
  171. * Called by kernel/ptrace.c when detaching..
  172. */
  173. void ptrace_disable(struct task_struct *child)
  174. {
  175. /* Nothing to do. */
  176. }
  177. /*
  178. * Handle hitting a breakpoint.
  179. */
  180. void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
  181. {
  182. siginfo_t info;
  183. info.si_signo = SIGTRAP;
  184. info.si_errno = 0;
  185. info.si_code = TRAP_BRKPT;
  186. info.si_addr = (void __user *)instruction_pointer(regs);
  187. force_sig_info(SIGTRAP, &info, tsk);
  188. }
  189. static int break_trap(struct pt_regs *regs, unsigned int instr)
  190. {
  191. ptrace_break(current, regs);
  192. return 0;
  193. }
  194. static struct undef_hook arm_break_hook = {
  195. .instr_mask = 0x0fffffff,
  196. .instr_val = 0x07f001f0,
  197. .cpsr_mask = PSR_T_BIT,
  198. .cpsr_val = 0,
  199. .fn = break_trap,
  200. };
  201. static struct undef_hook thumb_break_hook = {
  202. .instr_mask = 0xffff,
  203. .instr_val = 0xde01,
  204. .cpsr_mask = PSR_T_BIT,
  205. .cpsr_val = PSR_T_BIT,
  206. .fn = break_trap,
  207. };
  208. static struct undef_hook thumb2_break_hook = {
  209. .instr_mask = 0xffffffff,
  210. .instr_val = 0xf7f0a000,
  211. .cpsr_mask = PSR_T_BIT,
  212. .cpsr_val = PSR_T_BIT,
  213. .fn = break_trap,
  214. };
  215. static int __init ptrace_break_init(void)
  216. {
  217. register_undef_hook(&arm_break_hook);
  218. register_undef_hook(&thumb_break_hook);
  219. register_undef_hook(&thumb2_break_hook);
  220. return 0;
  221. }
  222. core_initcall(ptrace_break_init);
  223. /*
  224. * Read the word at offset "off" into the "struct user". We
  225. * actually access the pt_regs stored on the kernel stack.
  226. */
  227. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  228. unsigned long __user *ret)
  229. {
  230. unsigned long tmp;
  231. if (off & 3 || off >= sizeof(struct user))
  232. return -EIO;
  233. tmp = 0;
  234. if (off == PT_TEXT_ADDR)
  235. tmp = tsk->mm->start_code;
  236. else if (off == PT_DATA_ADDR)
  237. tmp = tsk->mm->start_data;
  238. else if (off == PT_TEXT_END_ADDR)
  239. tmp = tsk->mm->end_code;
  240. else if (off < sizeof(struct pt_regs))
  241. tmp = get_user_reg(tsk, off >> 2);
  242. return put_user(tmp, ret);
  243. }
  244. /*
  245. * Write the word at offset "off" into "struct user". We
  246. * actually access the pt_regs stored on the kernel stack.
  247. */
  248. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  249. unsigned long val)
  250. {
  251. if (off & 3 || off >= sizeof(struct user))
  252. return -EIO;
  253. if (off >= sizeof(struct pt_regs))
  254. return 0;
  255. return put_user_reg(tsk, off >> 2, val);
  256. }
  257. #ifdef CONFIG_IWMMXT
  258. /*
  259. * Get the child iWMMXt state.
  260. */
  261. static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
  262. {
  263. struct thread_info *thread = task_thread_info(tsk);
  264. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  265. return -ENODATA;
  266. iwmmxt_task_disable(thread); /* force it to ram */
  267. return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
  268. ? -EFAULT : 0;
  269. }
  270. /*
  271. * Set the child iWMMXt state.
  272. */
  273. static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
  274. {
  275. struct thread_info *thread = task_thread_info(tsk);
  276. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  277. return -EACCES;
  278. iwmmxt_task_release(thread); /* force a reload */
  279. return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
  280. ? -EFAULT : 0;
  281. }
  282. #endif
  283. #ifdef CONFIG_CRUNCH
  284. /*
  285. * Get the child Crunch state.
  286. */
  287. static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
  288. {
  289. struct thread_info *thread = task_thread_info(tsk);
  290. crunch_task_disable(thread); /* force it to ram */
  291. return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
  292. ? -EFAULT : 0;
  293. }
  294. /*
  295. * Set the child Crunch state.
  296. */
  297. static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
  298. {
  299. struct thread_info *thread = task_thread_info(tsk);
  300. crunch_task_release(thread); /* force a reload */
  301. return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
  302. ? -EFAULT : 0;
  303. }
  304. #endif
  305. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  306. /*
  307. * Convert a virtual register number into an index for a thread_info
  308. * breakpoint array. Breakpoints are identified using positive numbers
  309. * whilst watchpoints are negative. The registers are laid out as pairs
  310. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  311. * Register 0 is reserved for describing resource information.
  312. */
  313. static int ptrace_hbp_num_to_idx(long num)
  314. {
  315. if (num < 0)
  316. num = (ARM_MAX_BRP << 1) - num;
  317. return (num - 1) >> 1;
  318. }
  319. /*
  320. * Returns the virtual register number for the address of the
  321. * breakpoint at index idx.
  322. */
  323. static long ptrace_hbp_idx_to_num(int idx)
  324. {
  325. long mid = ARM_MAX_BRP << 1;
  326. long num = (idx << 1) + 1;
  327. return num > mid ? mid - num : num;
  328. }
  329. /*
  330. * Handle hitting a HW-breakpoint.
  331. */
  332. static void ptrace_hbptriggered(struct perf_event *bp,
  333. struct perf_sample_data *data,
  334. struct pt_regs *regs)
  335. {
  336. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  337. long num;
  338. int i;
  339. siginfo_t info;
  340. for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
  341. if (current->thread.debug.hbp[i] == bp)
  342. break;
  343. num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
  344. info.si_signo = SIGTRAP;
  345. info.si_errno = (int)num;
  346. info.si_code = TRAP_HWBKPT;
  347. info.si_addr = (void __user *)(bkpt->trigger);
  348. force_sig_info(SIGTRAP, &info, current);
  349. }
  350. /*
  351. * Set ptrace breakpoint pointers to zero for this task.
  352. * This is required in order to prevent child processes from unregistering
  353. * breakpoints held by their parent.
  354. */
  355. void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
  356. {
  357. memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
  358. }
  359. /*
  360. * Unregister breakpoints from this task and reset the pointers in
  361. * the thread_struct.
  362. */
  363. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  364. {
  365. int i;
  366. struct thread_struct *t = &tsk->thread;
  367. for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
  368. if (t->debug.hbp[i]) {
  369. unregister_hw_breakpoint(t->debug.hbp[i]);
  370. t->debug.hbp[i] = NULL;
  371. }
  372. }
  373. }
  374. static u32 ptrace_get_hbp_resource_info(void)
  375. {
  376. u8 num_brps, num_wrps, debug_arch, wp_len;
  377. u32 reg = 0;
  378. num_brps = hw_breakpoint_slots(TYPE_INST);
  379. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  380. debug_arch = arch_get_debug_arch();
  381. wp_len = arch_get_max_wp_len();
  382. reg |= debug_arch;
  383. reg <<= 8;
  384. reg |= wp_len;
  385. reg <<= 8;
  386. reg |= num_wrps;
  387. reg <<= 8;
  388. reg |= num_brps;
  389. return reg;
  390. }
  391. static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
  392. {
  393. struct perf_event_attr attr;
  394. ptrace_breakpoint_init(&attr);
  395. /* Initialise fields to sane defaults. */
  396. attr.bp_addr = 0;
  397. attr.bp_len = HW_BREAKPOINT_LEN_4;
  398. attr.bp_type = type;
  399. attr.disabled = 1;
  400. return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
  401. tsk);
  402. }
  403. static int ptrace_gethbpregs(struct task_struct *tsk, long num,
  404. unsigned long __user *data)
  405. {
  406. u32 reg;
  407. int idx, ret = 0;
  408. struct perf_event *bp;
  409. struct arch_hw_breakpoint_ctrl arch_ctrl;
  410. if (num == 0) {
  411. reg = ptrace_get_hbp_resource_info();
  412. } else {
  413. idx = ptrace_hbp_num_to_idx(num);
  414. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  415. ret = -EINVAL;
  416. goto out;
  417. }
  418. bp = tsk->thread.debug.hbp[idx];
  419. if (!bp) {
  420. reg = 0;
  421. goto put;
  422. }
  423. arch_ctrl = counter_arch_bp(bp)->ctrl;
  424. /*
  425. * Fix up the len because we may have adjusted it
  426. * to compensate for an unaligned address.
  427. */
  428. while (!(arch_ctrl.len & 0x1))
  429. arch_ctrl.len >>= 1;
  430. if (num & 0x1)
  431. reg = bp->attr.bp_addr;
  432. else
  433. reg = encode_ctrl_reg(arch_ctrl);
  434. }
  435. put:
  436. if (put_user(reg, data))
  437. ret = -EFAULT;
  438. out:
  439. return ret;
  440. }
  441. static int ptrace_sethbpregs(struct task_struct *tsk, long num,
  442. unsigned long __user *data)
  443. {
  444. int idx, gen_len, gen_type, implied_type, ret = 0;
  445. u32 user_val;
  446. struct perf_event *bp;
  447. struct arch_hw_breakpoint_ctrl ctrl;
  448. struct perf_event_attr attr;
  449. if (num == 0)
  450. goto out;
  451. else if (num < 0)
  452. implied_type = HW_BREAKPOINT_RW;
  453. else
  454. implied_type = HW_BREAKPOINT_X;
  455. idx = ptrace_hbp_num_to_idx(num);
  456. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  457. ret = -EINVAL;
  458. goto out;
  459. }
  460. if (get_user(user_val, data)) {
  461. ret = -EFAULT;
  462. goto out;
  463. }
  464. bp = tsk->thread.debug.hbp[idx];
  465. if (!bp) {
  466. bp = ptrace_hbp_create(tsk, implied_type);
  467. if (IS_ERR(bp)) {
  468. ret = PTR_ERR(bp);
  469. goto out;
  470. }
  471. tsk->thread.debug.hbp[idx] = bp;
  472. }
  473. attr = bp->attr;
  474. if (num & 0x1) {
  475. /* Address */
  476. attr.bp_addr = user_val;
  477. } else {
  478. /* Control */
  479. decode_ctrl_reg(user_val, &ctrl);
  480. ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
  481. if (ret)
  482. goto out;
  483. if ((gen_type & implied_type) != gen_type) {
  484. ret = -EINVAL;
  485. goto out;
  486. }
  487. attr.bp_len = gen_len;
  488. attr.bp_type = gen_type;
  489. attr.disabled = !ctrl.enabled;
  490. }
  491. ret = modify_user_hw_breakpoint(bp, &attr);
  492. out:
  493. return ret;
  494. }
  495. #endif
  496. /* regset get/set implementations */
  497. static int gpr_get(struct task_struct *target,
  498. const struct user_regset *regset,
  499. unsigned int pos, unsigned int count,
  500. void *kbuf, void __user *ubuf)
  501. {
  502. struct pt_regs *regs = task_pt_regs(target);
  503. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  504. regs,
  505. 0, sizeof(*regs));
  506. }
  507. static int gpr_set(struct task_struct *target,
  508. const struct user_regset *regset,
  509. unsigned int pos, unsigned int count,
  510. const void *kbuf, const void __user *ubuf)
  511. {
  512. int ret;
  513. struct pt_regs newregs;
  514. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  515. &newregs,
  516. 0, sizeof(newregs));
  517. if (ret)
  518. return ret;
  519. if (!valid_user_regs(&newregs))
  520. return -EINVAL;
  521. *task_pt_regs(target) = newregs;
  522. return 0;
  523. }
  524. static int fpa_get(struct task_struct *target,
  525. const struct user_regset *regset,
  526. unsigned int pos, unsigned int count,
  527. void *kbuf, void __user *ubuf)
  528. {
  529. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  530. &task_thread_info(target)->fpstate,
  531. 0, sizeof(struct user_fp));
  532. }
  533. static int fpa_set(struct task_struct *target,
  534. const struct user_regset *regset,
  535. unsigned int pos, unsigned int count,
  536. const void *kbuf, const void __user *ubuf)
  537. {
  538. struct thread_info *thread = task_thread_info(target);
  539. thread->used_cp[1] = thread->used_cp[2] = 1;
  540. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  541. &thread->fpstate,
  542. 0, sizeof(struct user_fp));
  543. }
  544. #ifdef CONFIG_VFP
  545. /*
  546. * VFP register get/set implementations.
  547. *
  548. * With respect to the kernel, struct user_fp is divided into three chunks:
  549. * 16 or 32 real VFP registers (d0-d15 or d0-31)
  550. * These are transferred to/from the real registers in the task's
  551. * vfp_hard_struct. The number of registers depends on the kernel
  552. * configuration.
  553. *
  554. * 16 or 0 fake VFP registers (d16-d31 or empty)
  555. * i.e., the user_vfp structure has space for 32 registers even if
  556. * the kernel doesn't have them all.
  557. *
  558. * vfp_get() reads this chunk as zero where applicable
  559. * vfp_set() ignores this chunk
  560. *
  561. * 1 word for the FPSCR
  562. *
  563. * The bounds-checking logic built into user_regset_copyout and friends
  564. * means that we can make a simple sequence of calls to map the relevant data
  565. * to/from the specified slice of the user regset structure.
  566. */
  567. static int vfp_get(struct task_struct *target,
  568. const struct user_regset *regset,
  569. unsigned int pos, unsigned int count,
  570. void *kbuf, void __user *ubuf)
  571. {
  572. int ret;
  573. struct thread_info *thread = task_thread_info(target);
  574. struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
  575. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  576. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  577. vfp_sync_hwstate(thread);
  578. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  579. &vfp->fpregs,
  580. user_fpregs_offset,
  581. user_fpregs_offset + sizeof(vfp->fpregs));
  582. if (ret)
  583. return ret;
  584. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  585. user_fpregs_offset + sizeof(vfp->fpregs),
  586. user_fpscr_offset);
  587. if (ret)
  588. return ret;
  589. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  590. &vfp->fpscr,
  591. user_fpscr_offset,
  592. user_fpscr_offset + sizeof(vfp->fpscr));
  593. }
  594. /*
  595. * For vfp_set() a read-modify-write is done on the VFP registers,
  596. * in order to avoid writing back a half-modified set of registers on
  597. * failure.
  598. */
  599. static int vfp_set(struct task_struct *target,
  600. const struct user_regset *regset,
  601. unsigned int pos, unsigned int count,
  602. const void *kbuf, const void __user *ubuf)
  603. {
  604. int ret;
  605. struct thread_info *thread = task_thread_info(target);
  606. struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
  607. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  608. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  609. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  610. &new_vfp.fpregs,
  611. user_fpregs_offset,
  612. user_fpregs_offset + sizeof(new_vfp.fpregs));
  613. if (ret)
  614. return ret;
  615. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  616. user_fpregs_offset + sizeof(new_vfp.fpregs),
  617. user_fpscr_offset);
  618. if (ret)
  619. return ret;
  620. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  621. &new_vfp.fpscr,
  622. user_fpscr_offset,
  623. user_fpscr_offset + sizeof(new_vfp.fpscr));
  624. if (ret)
  625. return ret;
  626. vfp_sync_hwstate(thread);
  627. thread->vfpstate.hard = new_vfp;
  628. vfp_flush_hwstate(thread);
  629. return 0;
  630. }
  631. #endif /* CONFIG_VFP */
  632. enum arm_regset {
  633. REGSET_GPR,
  634. REGSET_FPR,
  635. #ifdef CONFIG_VFP
  636. REGSET_VFP,
  637. #endif
  638. };
  639. static const struct user_regset arm_regsets[] = {
  640. [REGSET_GPR] = {
  641. .core_note_type = NT_PRSTATUS,
  642. .n = ELF_NGREG,
  643. .size = sizeof(u32),
  644. .align = sizeof(u32),
  645. .get = gpr_get,
  646. .set = gpr_set
  647. },
  648. [REGSET_FPR] = {
  649. /*
  650. * For the FPA regs in fpstate, the real fields are a mixture
  651. * of sizes, so pretend that the registers are word-sized:
  652. */
  653. .core_note_type = NT_PRFPREG,
  654. .n = sizeof(struct user_fp) / sizeof(u32),
  655. .size = sizeof(u32),
  656. .align = sizeof(u32),
  657. .get = fpa_get,
  658. .set = fpa_set
  659. },
  660. #ifdef CONFIG_VFP
  661. [REGSET_VFP] = {
  662. /*
  663. * Pretend that the VFP regs are word-sized, since the FPSCR is
  664. * a single word dangling at the end of struct user_vfp:
  665. */
  666. .core_note_type = NT_ARM_VFP,
  667. .n = ARM_VFPREGS_SIZE / sizeof(u32),
  668. .size = sizeof(u32),
  669. .align = sizeof(u32),
  670. .get = vfp_get,
  671. .set = vfp_set
  672. },
  673. #endif /* CONFIG_VFP */
  674. };
  675. static const struct user_regset_view user_arm_view = {
  676. .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  677. .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
  678. };
  679. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  680. {
  681. return &user_arm_view;
  682. }
  683. long arch_ptrace(struct task_struct *child, long request,
  684. unsigned long addr, unsigned long data)
  685. {
  686. int ret;
  687. unsigned long __user *datap = (unsigned long __user *) data;
  688. switch (request) {
  689. case PTRACE_PEEKUSR:
  690. ret = ptrace_read_user(child, addr, datap);
  691. break;
  692. case PTRACE_POKEUSR:
  693. ret = ptrace_write_user(child, addr, data);
  694. break;
  695. case PTRACE_GETREGS:
  696. ret = copy_regset_to_user(child,
  697. &user_arm_view, REGSET_GPR,
  698. 0, sizeof(struct pt_regs),
  699. datap);
  700. break;
  701. case PTRACE_SETREGS:
  702. ret = copy_regset_from_user(child,
  703. &user_arm_view, REGSET_GPR,
  704. 0, sizeof(struct pt_regs),
  705. datap);
  706. break;
  707. case PTRACE_GETFPREGS:
  708. ret = copy_regset_to_user(child,
  709. &user_arm_view, REGSET_FPR,
  710. 0, sizeof(union fp_state),
  711. datap);
  712. break;
  713. case PTRACE_SETFPREGS:
  714. ret = copy_regset_from_user(child,
  715. &user_arm_view, REGSET_FPR,
  716. 0, sizeof(union fp_state),
  717. datap);
  718. break;
  719. #ifdef CONFIG_IWMMXT
  720. case PTRACE_GETWMMXREGS:
  721. ret = ptrace_getwmmxregs(child, datap);
  722. break;
  723. case PTRACE_SETWMMXREGS:
  724. ret = ptrace_setwmmxregs(child, datap);
  725. break;
  726. #endif
  727. case PTRACE_GET_THREAD_AREA:
  728. ret = put_user(task_thread_info(child)->tp_value,
  729. datap);
  730. break;
  731. case PTRACE_SET_SYSCALL:
  732. task_thread_info(child)->syscall = data;
  733. ret = 0;
  734. break;
  735. #ifdef CONFIG_CRUNCH
  736. case PTRACE_GETCRUNCHREGS:
  737. ret = ptrace_getcrunchregs(child, datap);
  738. break;
  739. case PTRACE_SETCRUNCHREGS:
  740. ret = ptrace_setcrunchregs(child, datap);
  741. break;
  742. #endif
  743. #ifdef CONFIG_VFP
  744. case PTRACE_GETVFPREGS:
  745. ret = copy_regset_to_user(child,
  746. &user_arm_view, REGSET_VFP,
  747. 0, ARM_VFPREGS_SIZE,
  748. datap);
  749. break;
  750. case PTRACE_SETVFPREGS:
  751. ret = copy_regset_from_user(child,
  752. &user_arm_view, REGSET_VFP,
  753. 0, ARM_VFPREGS_SIZE,
  754. datap);
  755. break;
  756. #endif
  757. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  758. case PTRACE_GETHBPREGS:
  759. if (ptrace_get_breakpoints(child) < 0)
  760. return -ESRCH;
  761. ret = ptrace_gethbpregs(child, addr,
  762. (unsigned long __user *)data);
  763. ptrace_put_breakpoints(child);
  764. break;
  765. case PTRACE_SETHBPREGS:
  766. if (ptrace_get_breakpoints(child) < 0)
  767. return -ESRCH;
  768. ret = ptrace_sethbpregs(child, addr,
  769. (unsigned long __user *)data);
  770. ptrace_put_breakpoints(child);
  771. break;
  772. #endif
  773. default:
  774. ret = ptrace_request(child, request, addr, data);
  775. break;
  776. }
  777. return ret;
  778. }
  779. asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
  780. {
  781. unsigned long ip;
  782. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  783. return scno;
  784. if (!(current->ptrace & PT_PTRACED))
  785. return scno;
  786. /*
  787. * Save IP. IP is used to denote syscall entry/exit:
  788. * IP = 0 -> entry, = 1 -> exit
  789. */
  790. ip = regs->ARM_ip;
  791. regs->ARM_ip = why;
  792. current_thread_info()->syscall = scno;
  793. /* the 0x80 provides a way for the tracing parent to distinguish
  794. between a syscall stop and SIGTRAP delivery */
  795. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  796. ? 0x80 : 0));
  797. /*
  798. * this isn't the same as continuing with a signal, but it will do
  799. * for normal use. strace only continues with a signal if the
  800. * stopping signal is not SIGTRAP. -brl
  801. */
  802. if (current->exit_code) {
  803. send_sig(current->exit_code, current, 1);
  804. current->exit_code = 0;
  805. }
  806. regs->ARM_ip = ip;
  807. return current_thread_info()->syscall;
  808. }