ptrace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. /* ptrace.c: Sparc process tracing support.
  2. *
  3. * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. *
  6. * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
  7. * and David Mosberger.
  8. *
  9. * Added Linux support -miguel (weird, eh?, the original code was meant
  10. * to emulate SunOS).
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/smp.h>
  19. #include <linux/smp_lock.h>
  20. #include <linux/security.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/audit.h>
  23. #include <linux/signal.h>
  24. #include <linux/regset.h>
  25. #include <linux/compat.h>
  26. #include <linux/elf.h>
  27. #include <asm/asi.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/system.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/psrcompat.h>
  32. #include <asm/visasm.h>
  33. #include <asm/spitfire.h>
  34. #include <asm/page.h>
  35. #include <asm/cpudata.h>
  36. #include <asm/cacheflush.h>
  37. #include "entry.h"
  38. /* #define ALLOW_INIT_TRACING */
  39. /*
  40. * Called by kernel/ptrace.c when detaching..
  41. *
  42. * Make sure single step bits etc are not set.
  43. */
  44. void ptrace_disable(struct task_struct *child)
  45. {
  46. /* nothing to do */
  47. }
  48. /* To get the necessary page struct, access_process_vm() first calls
  49. * get_user_pages(). This has done a flush_dcache_page() on the
  50. * accessed page. Then our caller (copy_{to,from}_user_page()) did
  51. * to memcpy to read/write the data from that page.
  52. *
  53. * Now, the only thing we have to do is:
  54. * 1) flush the D-cache if it's possible than an illegal alias
  55. * has been created
  56. * 2) flush the I-cache if this is pre-cheetah and we did a write
  57. */
  58. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  59. unsigned long uaddr, void *kaddr,
  60. unsigned long len, int write)
  61. {
  62. BUG_ON(len > PAGE_SIZE);
  63. if (tlb_type == hypervisor)
  64. return;
  65. preempt_disable();
  66. #ifdef DCACHE_ALIASING_POSSIBLE
  67. /* If bit 13 of the kernel address we used to access the
  68. * user page is the same as the virtual address that page
  69. * is mapped to in the user's address space, we can skip the
  70. * D-cache flush.
  71. */
  72. if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
  73. unsigned long start = __pa(kaddr);
  74. unsigned long end = start + len;
  75. unsigned long dcache_line_size;
  76. dcache_line_size = local_cpu_data().dcache_line_size;
  77. if (tlb_type == spitfire) {
  78. for (; start < end; start += dcache_line_size)
  79. spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
  80. } else {
  81. start &= ~(dcache_line_size - 1);
  82. for (; start < end; start += dcache_line_size)
  83. __asm__ __volatile__(
  84. "stxa %%g0, [%0] %1\n\t"
  85. "membar #Sync"
  86. : /* no outputs */
  87. : "r" (start),
  88. "i" (ASI_DCACHE_INVALIDATE));
  89. }
  90. }
  91. #endif
  92. if (write && tlb_type == spitfire) {
  93. unsigned long start = (unsigned long) kaddr;
  94. unsigned long end = start + len;
  95. unsigned long icache_line_size;
  96. icache_line_size = local_cpu_data().icache_line_size;
  97. for (; start < end; start += icache_line_size)
  98. flushi(start);
  99. }
  100. preempt_enable();
  101. }
  102. enum sparc_regset {
  103. REGSET_GENERAL,
  104. REGSET_FP,
  105. };
  106. static int genregs64_get(struct task_struct *target,
  107. const struct user_regset *regset,
  108. unsigned int pos, unsigned int count,
  109. void *kbuf, void __user *ubuf)
  110. {
  111. const struct pt_regs *regs = task_pt_regs(target);
  112. int ret;
  113. if (target == current)
  114. flushw_user();
  115. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  116. regs->u_regs,
  117. 0, 16 * sizeof(u64));
  118. if (!ret) {
  119. unsigned long __user *reg_window = (unsigned long __user *)
  120. (regs->u_regs[UREG_I6] + STACK_BIAS);
  121. unsigned long window[16];
  122. if (copy_from_user(window, reg_window, sizeof(window)))
  123. return -EFAULT;
  124. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  125. window,
  126. 16 * sizeof(u64),
  127. 32 * sizeof(u64));
  128. }
  129. if (!ret) {
  130. /* TSTATE, TPC, TNPC */
  131. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  132. &regs->tstate,
  133. 32 * sizeof(u64),
  134. 35 * sizeof(u64));
  135. }
  136. if (!ret) {
  137. unsigned long y = regs->y;
  138. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  139. &y,
  140. 35 * sizeof(u64),
  141. 36 * sizeof(u64));
  142. }
  143. if (!ret)
  144. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  145. 36 * sizeof(u64), -1);
  146. return ret;
  147. }
  148. static int genregs64_set(struct task_struct *target,
  149. const struct user_regset *regset,
  150. unsigned int pos, unsigned int count,
  151. const void *kbuf, const void __user *ubuf)
  152. {
  153. struct pt_regs *regs = task_pt_regs(target);
  154. int ret;
  155. if (target == current)
  156. flushw_user();
  157. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  158. regs->u_regs,
  159. 0, 16 * sizeof(u64));
  160. if (!ret && count > 0) {
  161. unsigned long __user *reg_window = (unsigned long __user *)
  162. (regs->u_regs[UREG_I6] + STACK_BIAS);
  163. unsigned long window[16];
  164. if (copy_from_user(window, reg_window, sizeof(window)))
  165. return -EFAULT;
  166. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  167. window,
  168. 16 * sizeof(u64),
  169. 32 * sizeof(u64));
  170. if (!ret &&
  171. copy_to_user(reg_window, window, sizeof(window)))
  172. return -EFAULT;
  173. }
  174. if (!ret && count > 0) {
  175. unsigned long tstate;
  176. /* TSTATE */
  177. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  178. &tstate,
  179. 32 * sizeof(u64),
  180. 33 * sizeof(u64));
  181. if (!ret) {
  182. /* Only the condition codes can be modified
  183. * in the %tstate register.
  184. */
  185. tstate &= (TSTATE_ICC | TSTATE_XCC);
  186. regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  187. regs->tstate |= tstate;
  188. }
  189. }
  190. if (!ret) {
  191. /* TPC, TNPC */
  192. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  193. &regs->tpc,
  194. 33 * sizeof(u64),
  195. 35 * sizeof(u64));
  196. }
  197. if (!ret) {
  198. unsigned long y;
  199. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  200. &y,
  201. 35 * sizeof(u64),
  202. 36 * sizeof(u64));
  203. if (!ret)
  204. regs->y = y;
  205. }
  206. if (!ret)
  207. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  208. 36 * sizeof(u64), -1);
  209. return ret;
  210. }
  211. static int fpregs64_get(struct task_struct *target,
  212. const struct user_regset *regset,
  213. unsigned int pos, unsigned int count,
  214. void *kbuf, void __user *ubuf)
  215. {
  216. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  217. unsigned long fprs, fsr, gsr;
  218. int ret;
  219. if (target == current)
  220. save_and_clear_fpu();
  221. fprs = task_thread_info(target)->fpsaved[0];
  222. if (fprs & FPRS_DL)
  223. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  224. fpregs,
  225. 0, 16 * sizeof(u64));
  226. else
  227. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  228. 0,
  229. 16 * sizeof(u64));
  230. if (!ret) {
  231. if (fprs & FPRS_DU)
  232. ret = user_regset_copyout(&pos, &count,
  233. &kbuf, &ubuf,
  234. fpregs + 16,
  235. 16 * sizeof(u64),
  236. 32 * sizeof(u64));
  237. else
  238. ret = user_regset_copyout_zero(&pos, &count,
  239. &kbuf, &ubuf,
  240. 16 * sizeof(u64),
  241. 32 * sizeof(u64));
  242. }
  243. if (fprs & FPRS_FEF) {
  244. fsr = task_thread_info(target)->xfsr[0];
  245. gsr = task_thread_info(target)->gsr[0];
  246. } else {
  247. fsr = gsr = 0;
  248. }
  249. if (!ret)
  250. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  251. &fsr,
  252. 32 * sizeof(u64),
  253. 33 * sizeof(u64));
  254. if (!ret)
  255. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  256. &gsr,
  257. 33 * sizeof(u64),
  258. 34 * sizeof(u64));
  259. if (!ret)
  260. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  261. &fprs,
  262. 34 * sizeof(u64),
  263. 35 * sizeof(u64));
  264. if (!ret)
  265. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  266. 35 * sizeof(u64), -1);
  267. return ret;
  268. }
  269. static int fpregs64_set(struct task_struct *target,
  270. const struct user_regset *regset,
  271. unsigned int pos, unsigned int count,
  272. const void *kbuf, const void __user *ubuf)
  273. {
  274. unsigned long *fpregs = task_thread_info(target)->fpregs;
  275. unsigned long fprs;
  276. int ret;
  277. if (target == current)
  278. save_and_clear_fpu();
  279. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  280. fpregs,
  281. 0, 32 * sizeof(u64));
  282. if (!ret)
  283. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  284. task_thread_info(target)->xfsr,
  285. 32 * sizeof(u64),
  286. 33 * sizeof(u64));
  287. if (!ret)
  288. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  289. task_thread_info(target)->gsr,
  290. 33 * sizeof(u64),
  291. 34 * sizeof(u64));
  292. fprs = task_thread_info(target)->fpsaved[0];
  293. if (!ret && count > 0) {
  294. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  295. &fprs,
  296. 34 * sizeof(u64),
  297. 35 * sizeof(u64));
  298. }
  299. fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
  300. task_thread_info(target)->fpsaved[0] = fprs;
  301. if (!ret)
  302. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  303. 35 * sizeof(u64), -1);
  304. return ret;
  305. }
  306. static const struct user_regset sparc64_regsets[] = {
  307. /* Format is:
  308. * G0 --> G7
  309. * O0 --> O7
  310. * L0 --> L7
  311. * I0 --> I7
  312. * TSTATE, TPC, TNPC, Y
  313. */
  314. [REGSET_GENERAL] = {
  315. .core_note_type = NT_PRSTATUS,
  316. .n = 36 * sizeof(u64),
  317. .size = sizeof(u64), .align = sizeof(u64),
  318. .get = genregs64_get, .set = genregs64_set
  319. },
  320. /* Format is:
  321. * F0 --> F63
  322. * FSR
  323. * GSR
  324. * FPRS
  325. */
  326. [REGSET_FP] = {
  327. .core_note_type = NT_PRFPREG,
  328. .n = 35 * sizeof(u64),
  329. .size = sizeof(u64), .align = sizeof(u64),
  330. .get = fpregs64_get, .set = fpregs64_set
  331. },
  332. };
  333. static const struct user_regset_view user_sparc64_view = {
  334. .name = "sparc64", .e_machine = EM_SPARCV9,
  335. .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
  336. };
  337. #ifdef CONFIG_COMPAT
  338. static int genregs32_get(struct task_struct *target,
  339. const struct user_regset *regset,
  340. unsigned int pos, unsigned int count,
  341. void *kbuf, void __user *ubuf)
  342. {
  343. const struct pt_regs *regs = task_pt_regs(target);
  344. compat_ulong_t __user *reg_window;
  345. compat_ulong_t *k = kbuf;
  346. compat_ulong_t __user *u = ubuf;
  347. compat_ulong_t reg;
  348. if (target == current)
  349. flushw_user();
  350. pos /= sizeof(reg);
  351. count /= sizeof(reg);
  352. if (kbuf) {
  353. for (; count > 0 && pos < 16; count--)
  354. *k++ = regs->u_regs[pos++];
  355. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  356. for (; count > 0 && pos < 32; count--) {
  357. if (get_user(*k++, &reg_window[pos++]))
  358. return -EFAULT;
  359. }
  360. } else {
  361. for (; count > 0 && pos < 16; count--) {
  362. if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
  363. return -EFAULT;
  364. }
  365. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  366. for (; count > 0 && pos < 32; count--) {
  367. if (get_user(reg, &reg_window[pos++]) ||
  368. put_user(reg, u++))
  369. return -EFAULT;
  370. }
  371. }
  372. while (count > 0) {
  373. switch (pos) {
  374. case 32: /* PSR */
  375. reg = tstate_to_psr(regs->tstate);
  376. break;
  377. case 33: /* PC */
  378. reg = regs->tpc;
  379. break;
  380. case 34: /* NPC */
  381. reg = regs->tnpc;
  382. break;
  383. case 35: /* Y */
  384. reg = regs->y;
  385. break;
  386. case 36: /* WIM */
  387. case 37: /* TBR */
  388. reg = 0;
  389. break;
  390. default:
  391. goto finish;
  392. }
  393. if (kbuf)
  394. *k++ = reg;
  395. else if (put_user(reg, u++))
  396. return -EFAULT;
  397. pos++;
  398. count--;
  399. }
  400. finish:
  401. pos *= sizeof(reg);
  402. count *= sizeof(reg);
  403. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  404. 38 * sizeof(reg), -1);
  405. }
  406. static int genregs32_set(struct task_struct *target,
  407. const struct user_regset *regset,
  408. unsigned int pos, unsigned int count,
  409. const void *kbuf, const void __user *ubuf)
  410. {
  411. struct pt_regs *regs = task_pt_regs(target);
  412. compat_ulong_t __user *reg_window;
  413. const compat_ulong_t *k = kbuf;
  414. const compat_ulong_t __user *u = ubuf;
  415. compat_ulong_t reg;
  416. if (target == current)
  417. flushw_user();
  418. pos /= sizeof(reg);
  419. count /= sizeof(reg);
  420. if (kbuf) {
  421. for (; count > 0 && pos < 16; count--)
  422. regs->u_regs[pos++] = *k++;
  423. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  424. for (; count > 0 && pos < 32; count--) {
  425. if (put_user(*k++, &reg_window[pos++]))
  426. return -EFAULT;
  427. }
  428. } else {
  429. for (; count > 0 && pos < 16; count--) {
  430. if (get_user(reg, u++))
  431. return -EFAULT;
  432. regs->u_regs[pos++] = reg;
  433. }
  434. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  435. for (; count > 0 && pos < 32; count--) {
  436. if (get_user(reg, u++) ||
  437. put_user(reg, &reg_window[pos++]))
  438. return -EFAULT;
  439. }
  440. }
  441. while (count > 0) {
  442. unsigned long tstate;
  443. if (kbuf)
  444. reg = *k++;
  445. else if (get_user(reg, u++))
  446. return -EFAULT;
  447. switch (pos) {
  448. case 32: /* PSR */
  449. tstate = regs->tstate;
  450. tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  451. tstate |= psr_to_tstate_icc(reg);
  452. regs->tstate = tstate;
  453. break;
  454. case 33: /* PC */
  455. regs->tpc = reg;
  456. break;
  457. case 34: /* NPC */
  458. regs->tnpc = reg;
  459. break;
  460. case 35: /* Y */
  461. regs->y = reg;
  462. break;
  463. case 36: /* WIM */
  464. case 37: /* TBR */
  465. break;
  466. default:
  467. goto finish;
  468. }
  469. pos++;
  470. count--;
  471. }
  472. finish:
  473. pos *= sizeof(reg);
  474. count *= sizeof(reg);
  475. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  476. 38 * sizeof(reg), -1);
  477. }
  478. static int fpregs32_get(struct task_struct *target,
  479. const struct user_regset *regset,
  480. unsigned int pos, unsigned int count,
  481. void *kbuf, void __user *ubuf)
  482. {
  483. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  484. compat_ulong_t enabled;
  485. unsigned long fprs;
  486. compat_ulong_t fsr;
  487. int ret = 0;
  488. if (target == current)
  489. save_and_clear_fpu();
  490. fprs = task_thread_info(target)->fpsaved[0];
  491. if (fprs & FPRS_FEF) {
  492. fsr = task_thread_info(target)->xfsr[0];
  493. enabled = 1;
  494. } else {
  495. fsr = 0;
  496. enabled = 0;
  497. }
  498. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  499. fpregs,
  500. 0, 32 * sizeof(u32));
  501. if (!ret)
  502. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  503. 32 * sizeof(u32),
  504. 33 * sizeof(u32));
  505. if (!ret)
  506. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  507. &fsr,
  508. 33 * sizeof(u32),
  509. 34 * sizeof(u32));
  510. if (!ret) {
  511. compat_ulong_t val;
  512. val = (enabled << 8) | (8 << 16);
  513. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  514. &val,
  515. 34 * sizeof(u32),
  516. 35 * sizeof(u32));
  517. }
  518. if (!ret)
  519. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  520. 35 * sizeof(u32), -1);
  521. return ret;
  522. }
  523. static int fpregs32_set(struct task_struct *target,
  524. const struct user_regset *regset,
  525. unsigned int pos, unsigned int count,
  526. const void *kbuf, const void __user *ubuf)
  527. {
  528. unsigned long *fpregs = task_thread_info(target)->fpregs;
  529. unsigned long fprs;
  530. int ret;
  531. if (target == current)
  532. save_and_clear_fpu();
  533. fprs = task_thread_info(target)->fpsaved[0];
  534. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  535. fpregs,
  536. 0, 32 * sizeof(u32));
  537. if (!ret)
  538. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  539. 32 * sizeof(u32),
  540. 33 * sizeof(u32));
  541. if (!ret && count > 0) {
  542. compat_ulong_t fsr;
  543. unsigned long val;
  544. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  545. &fsr,
  546. 33 * sizeof(u32),
  547. 34 * sizeof(u32));
  548. if (!ret) {
  549. val = task_thread_info(target)->xfsr[0];
  550. val &= 0xffffffff00000000UL;
  551. val |= fsr;
  552. task_thread_info(target)->xfsr[0] = val;
  553. }
  554. }
  555. fprs |= (FPRS_FEF | FPRS_DL);
  556. task_thread_info(target)->fpsaved[0] = fprs;
  557. if (!ret)
  558. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  559. 34 * sizeof(u32), -1);
  560. return ret;
  561. }
  562. static const struct user_regset sparc32_regsets[] = {
  563. /* Format is:
  564. * G0 --> G7
  565. * O0 --> O7
  566. * L0 --> L7
  567. * I0 --> I7
  568. * PSR, PC, nPC, Y, WIM, TBR
  569. */
  570. [REGSET_GENERAL] = {
  571. .core_note_type = NT_PRSTATUS,
  572. .n = 38 * sizeof(u32),
  573. .size = sizeof(u32), .align = sizeof(u32),
  574. .get = genregs32_get, .set = genregs32_set
  575. },
  576. /* Format is:
  577. * F0 --> F31
  578. * empty 32-bit word
  579. * FSR (32--bit word)
  580. * FPU QUEUE COUNT (8-bit char)
  581. * FPU QUEUE ENTRYSIZE (8-bit char)
  582. * FPU ENABLED (8-bit char)
  583. * empty 8-bit char
  584. * FPU QUEUE (64 32-bit ints)
  585. */
  586. [REGSET_FP] = {
  587. .core_note_type = NT_PRFPREG,
  588. .n = 99 * sizeof(u32),
  589. .size = sizeof(u32), .align = sizeof(u32),
  590. .get = fpregs32_get, .set = fpregs32_set
  591. },
  592. };
  593. static const struct user_regset_view user_sparc32_view = {
  594. .name = "sparc", .e_machine = EM_SPARC,
  595. .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
  596. };
  597. #endif /* CONFIG_COMPAT */
  598. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  599. {
  600. #ifdef CONFIG_COMPAT
  601. if (test_tsk_thread_flag(task, TIF_32BIT))
  602. return &user_sparc32_view;
  603. #endif
  604. return &user_sparc64_view;
  605. }
  606. #ifdef CONFIG_COMPAT
  607. struct compat_fps {
  608. unsigned int regs[32];
  609. unsigned int fsr;
  610. unsigned int flags;
  611. unsigned int extra;
  612. unsigned int fpqd;
  613. struct compat_fq {
  614. unsigned int insnaddr;
  615. unsigned int insn;
  616. } fpq[16];
  617. };
  618. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  619. compat_ulong_t caddr, compat_ulong_t cdata)
  620. {
  621. const struct user_regset_view *view = task_user_regset_view(child);
  622. compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
  623. struct pt_regs32 __user *pregs;
  624. struct compat_fps __user *fps;
  625. unsigned long addr2 = caddr2;
  626. unsigned long addr = caddr;
  627. unsigned long data = cdata;
  628. int ret;
  629. pregs = (struct pt_regs32 __user *) addr;
  630. fps = (struct compat_fps __user *) addr;
  631. switch (request) {
  632. case PTRACE_PEEKUSR:
  633. ret = (addr != 0) ? -EIO : 0;
  634. break;
  635. case PTRACE_GETREGS:
  636. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  637. 32 * sizeof(u32),
  638. 4 * sizeof(u32),
  639. &pregs->psr);
  640. if (!ret)
  641. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  642. 1 * sizeof(u32),
  643. 15 * sizeof(u32),
  644. &pregs->u_regs[0]);
  645. break;
  646. case PTRACE_SETREGS:
  647. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  648. 32 * sizeof(u32),
  649. 4 * sizeof(u32),
  650. &pregs->psr);
  651. if (!ret)
  652. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  653. 1 * sizeof(u32),
  654. 15 * sizeof(u32),
  655. &pregs->u_regs[0]);
  656. break;
  657. case PTRACE_GETFPREGS:
  658. ret = copy_regset_to_user(child, view, REGSET_FP,
  659. 0 * sizeof(u32),
  660. 32 * sizeof(u32),
  661. &fps->regs[0]);
  662. if (!ret)
  663. ret = copy_regset_to_user(child, view, REGSET_FP,
  664. 33 * sizeof(u32),
  665. 1 * sizeof(u32),
  666. &fps->fsr);
  667. if (!ret) {
  668. if (__put_user(0, &fps->flags) ||
  669. __put_user(0, &fps->extra) ||
  670. __put_user(0, &fps->fpqd) ||
  671. clear_user(&fps->fpq[0], 32 * sizeof(unsigned int)))
  672. ret = -EFAULT;
  673. }
  674. break;
  675. case PTRACE_SETFPREGS:
  676. ret = copy_regset_from_user(child, view, REGSET_FP,
  677. 0 * sizeof(u32),
  678. 32 * sizeof(u32),
  679. &fps->regs[0]);
  680. if (!ret)
  681. ret = copy_regset_from_user(child, view, REGSET_FP,
  682. 33 * sizeof(u32),
  683. 1 * sizeof(u32),
  684. &fps->fsr);
  685. break;
  686. case PTRACE_READTEXT:
  687. case PTRACE_READDATA:
  688. ret = ptrace_readdata(child, addr,
  689. (char __user *)addr2, data);
  690. if (ret == data)
  691. ret = 0;
  692. else if (ret >= 0)
  693. ret = -EIO;
  694. break;
  695. case PTRACE_WRITETEXT:
  696. case PTRACE_WRITEDATA:
  697. ret = ptrace_writedata(child, (char __user *) addr2,
  698. addr, data);
  699. if (ret == data)
  700. ret = 0;
  701. else if (ret >= 0)
  702. ret = -EIO;
  703. break;
  704. default:
  705. ret = compat_ptrace_request(child, request, addr, data);
  706. break;
  707. }
  708. return ret;
  709. }
  710. #endif /* CONFIG_COMPAT */
  711. struct fps {
  712. unsigned int regs[64];
  713. unsigned long fsr;
  714. };
  715. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  716. {
  717. const struct user_regset_view *view = task_user_regset_view(child);
  718. unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
  719. struct pt_regs __user *pregs;
  720. struct fps __user *fps;
  721. int ret;
  722. pregs = (struct pt_regs __user *) (unsigned long) addr;
  723. fps = (struct fps __user *) (unsigned long) addr;
  724. switch (request) {
  725. case PTRACE_PEEKUSR:
  726. ret = (addr != 0) ? -EIO : 0;
  727. break;
  728. case PTRACE_GETREGS64:
  729. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  730. 1 * sizeof(u64),
  731. 15 * sizeof(u64),
  732. &pregs->u_regs[0]);
  733. if (!ret) {
  734. /* XXX doesn't handle 'y' register correctly XXX */
  735. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  736. 32 * sizeof(u64),
  737. 4 * sizeof(u64),
  738. &pregs->tstate);
  739. }
  740. break;
  741. case PTRACE_SETREGS64:
  742. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  743. 1 * sizeof(u64),
  744. 15 * sizeof(u64),
  745. &pregs->u_regs[0]);
  746. if (!ret) {
  747. /* XXX doesn't handle 'y' register correctly XXX */
  748. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  749. 32 * sizeof(u64),
  750. 4 * sizeof(u64),
  751. &pregs->tstate);
  752. }
  753. break;
  754. case PTRACE_GETFPREGS64:
  755. ret = copy_regset_to_user(child, view, REGSET_FP,
  756. 0 * sizeof(u64),
  757. 33 * sizeof(u64),
  758. fps);
  759. break;
  760. case PTRACE_SETFPREGS64:
  761. ret = copy_regset_to_user(child, view, REGSET_FP,
  762. 0 * sizeof(u64),
  763. 33 * sizeof(u64),
  764. fps);
  765. break;
  766. case PTRACE_READTEXT:
  767. case PTRACE_READDATA:
  768. ret = ptrace_readdata(child, addr,
  769. (char __user *)addr2, data);
  770. if (ret == data)
  771. ret = 0;
  772. else if (ret >= 0)
  773. ret = -EIO;
  774. break;
  775. case PTRACE_WRITETEXT:
  776. case PTRACE_WRITEDATA:
  777. ret = ptrace_writedata(child, (char __user *) addr2,
  778. addr, data);
  779. if (ret == data)
  780. ret = 0;
  781. else if (ret >= 0)
  782. ret = -EIO;
  783. break;
  784. default:
  785. ret = ptrace_request(child, request, addr, data);
  786. break;
  787. }
  788. return ret;
  789. }
  790. asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
  791. {
  792. /* do the secure computing check first */
  793. secure_computing(regs->u_regs[UREG_G1]);
  794. if (unlikely(current->audit_context) && syscall_exit_p) {
  795. unsigned long tstate = regs->tstate;
  796. int result = AUDITSC_SUCCESS;
  797. if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
  798. result = AUDITSC_FAILURE;
  799. audit_syscall_exit(result, regs->u_regs[UREG_I0]);
  800. }
  801. if (!(current->ptrace & PT_PTRACED))
  802. goto out;
  803. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  804. goto out;
  805. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  806. ? 0x80 : 0));
  807. /*
  808. * this isn't the same as continuing with a signal, but it will do
  809. * for normal use. strace only continues with a signal if the
  810. * stopping signal is not SIGTRAP. -brl
  811. */
  812. if (current->exit_code) {
  813. send_sig(current->exit_code, current, 1);
  814. current->exit_code = 0;
  815. }
  816. out:
  817. if (unlikely(current->audit_context) && !syscall_exit_p)
  818. audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
  819. AUDIT_ARCH_SPARC :
  820. AUDIT_ARCH_SPARC64),
  821. regs->u_regs[UREG_G1],
  822. regs->u_regs[UREG_I0],
  823. regs->u_regs[UREG_I1],
  824. regs->u_regs[UREG_I2],
  825. regs->u_regs[UREG_I3]);
  826. }