ptrace.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. /* ptrace.c: Sparc process tracing support.
  2. *
  3. * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. *
  6. * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
  7. * and David Mosberger.
  8. *
  9. * Added Linux support -miguel (weird, eh?, the original code was meant
  10. * to emulate SunOS).
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/smp.h>
  19. #include <linux/smp_lock.h>
  20. #include <linux/security.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/audit.h>
  23. #include <linux/signal.h>
  24. #include <linux/regset.h>
  25. #include <linux/compat.h>
  26. #include <linux/elf.h>
  27. #include <asm/asi.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/system.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/psrcompat.h>
  32. #include <asm/visasm.h>
  33. #include <asm/spitfire.h>
  34. #include <asm/page.h>
  35. #include <asm/cpudata.h>
  36. /* #define ALLOW_INIT_TRACING */
  37. /*
  38. * Called by kernel/ptrace.c when detaching..
  39. *
  40. * Make sure single step bits etc are not set.
  41. */
  42. void ptrace_disable(struct task_struct *child)
  43. {
  44. /* nothing to do */
  45. }
  46. /* To get the necessary page struct, access_process_vm() first calls
  47. * get_user_pages(). This has done a flush_dcache_page() on the
  48. * accessed page. Then our caller (copy_{to,from}_user_page()) did
  49. * to memcpy to read/write the data from that page.
  50. *
  51. * Now, the only thing we have to do is:
  52. * 1) flush the D-cache if it's possible than an illegal alias
  53. * has been created
  54. * 2) flush the I-cache if this is pre-cheetah and we did a write
  55. */
  56. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  57. unsigned long uaddr, void *kaddr,
  58. unsigned long len, int write)
  59. {
  60. BUG_ON(len > PAGE_SIZE);
  61. if (tlb_type == hypervisor)
  62. return;
  63. #ifdef DCACHE_ALIASING_POSSIBLE
  64. /* If bit 13 of the kernel address we used to access the
  65. * user page is the same as the virtual address that page
  66. * is mapped to in the user's address space, we can skip the
  67. * D-cache flush.
  68. */
  69. if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
  70. unsigned long start = __pa(kaddr);
  71. unsigned long end = start + len;
  72. unsigned long dcache_line_size;
  73. dcache_line_size = local_cpu_data().dcache_line_size;
  74. if (tlb_type == spitfire) {
  75. for (; start < end; start += dcache_line_size)
  76. spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
  77. } else {
  78. start &= ~(dcache_line_size - 1);
  79. for (; start < end; start += dcache_line_size)
  80. __asm__ __volatile__(
  81. "stxa %%g0, [%0] %1\n\t"
  82. "membar #Sync"
  83. : /* no outputs */
  84. : "r" (start),
  85. "i" (ASI_DCACHE_INVALIDATE));
  86. }
  87. }
  88. #endif
  89. if (write && tlb_type == spitfire) {
  90. unsigned long start = (unsigned long) kaddr;
  91. unsigned long end = start + len;
  92. unsigned long icache_line_size;
  93. icache_line_size = local_cpu_data().icache_line_size;
  94. for (; start < end; start += icache_line_size)
  95. flushi(start);
  96. }
  97. }
  98. enum sparc_regset {
  99. REGSET_GENERAL,
  100. REGSET_FP,
  101. };
  102. static int genregs64_get(struct task_struct *target,
  103. const struct user_regset *regset,
  104. unsigned int pos, unsigned int count,
  105. void *kbuf, void __user *ubuf)
  106. {
  107. const struct pt_regs *regs = task_pt_regs(target);
  108. int ret;
  109. if (target == current)
  110. flushw_user();
  111. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  112. regs->u_regs,
  113. 0, 16 * sizeof(u64));
  114. if (!ret) {
  115. unsigned long __user *reg_window = (unsigned long __user *)
  116. (regs->u_regs[UREG_I6] + STACK_BIAS);
  117. unsigned long window[16];
  118. if (copy_from_user(window, reg_window, sizeof(window)))
  119. return -EFAULT;
  120. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  121. window,
  122. 16 * sizeof(u64),
  123. 32 * sizeof(u64));
  124. }
  125. if (!ret) {
  126. /* TSTATE, TPC, TNPC */
  127. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  128. &regs->tstate,
  129. 32 * sizeof(u64),
  130. 35 * sizeof(u64));
  131. }
  132. if (!ret) {
  133. unsigned long y = regs->y;
  134. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  135. &y,
  136. 35 * sizeof(u64),
  137. 36 * sizeof(u64));
  138. }
  139. if (!ret)
  140. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  141. 36 * sizeof(u64), -1);
  142. return ret;
  143. }
  144. static int genregs64_set(struct task_struct *target,
  145. const struct user_regset *regset,
  146. unsigned int pos, unsigned int count,
  147. const void *kbuf, const void __user *ubuf)
  148. {
  149. struct pt_regs *regs = task_pt_regs(target);
  150. int ret;
  151. if (target == current)
  152. flushw_user();
  153. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  154. regs->u_regs,
  155. 0, 16 * sizeof(u64));
  156. if (!ret && count > 0) {
  157. unsigned long __user *reg_window = (unsigned long __user *)
  158. (regs->u_regs[UREG_I6] + STACK_BIAS);
  159. unsigned long window[16];
  160. if (copy_from_user(window, reg_window, sizeof(window)))
  161. return -EFAULT;
  162. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  163. window,
  164. 16 * sizeof(u64),
  165. 32 * sizeof(u64));
  166. if (!ret &&
  167. copy_to_user(reg_window, window, sizeof(window)))
  168. return -EFAULT;
  169. }
  170. if (!ret && count > 0) {
  171. unsigned long tstate;
  172. /* TSTATE */
  173. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  174. &tstate,
  175. 32 * sizeof(u64),
  176. 33 * sizeof(u64));
  177. if (!ret) {
  178. /* Only the condition codes can be modified
  179. * in the %tstate register.
  180. */
  181. tstate &= (TSTATE_ICC | TSTATE_XCC);
  182. regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  183. regs->tstate |= tstate;
  184. }
  185. }
  186. if (!ret) {
  187. /* TPC, TNPC */
  188. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  189. &regs->tpc,
  190. 33 * sizeof(u64),
  191. 35 * sizeof(u64));
  192. }
  193. if (!ret) {
  194. unsigned long y;
  195. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  196. &y,
  197. 35 * sizeof(u64),
  198. 36 * sizeof(u64));
  199. if (!ret)
  200. regs->y = y;
  201. }
  202. if (!ret)
  203. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  204. 36 * sizeof(u64), -1);
  205. return ret;
  206. }
  207. static int fpregs64_get(struct task_struct *target,
  208. const struct user_regset *regset,
  209. unsigned int pos, unsigned int count,
  210. void *kbuf, void __user *ubuf)
  211. {
  212. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  213. unsigned long fprs, fsr, gsr;
  214. int ret;
  215. if (target == current)
  216. save_and_clear_fpu();
  217. fprs = task_thread_info(target)->fpsaved[0];
  218. if (fprs & FPRS_DL)
  219. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  220. fpregs,
  221. 0, 16 * sizeof(u64));
  222. else
  223. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  224. 0,
  225. 16 * sizeof(u64));
  226. if (!ret) {
  227. if (fprs & FPRS_DU)
  228. ret = user_regset_copyout(&pos, &count,
  229. &kbuf, &ubuf,
  230. fpregs + 16,
  231. 16 * sizeof(u64),
  232. 32 * sizeof(u64));
  233. else
  234. ret = user_regset_copyout_zero(&pos, &count,
  235. &kbuf, &ubuf,
  236. 16 * sizeof(u64),
  237. 32 * sizeof(u64));
  238. }
  239. if (fprs & FPRS_FEF) {
  240. fsr = task_thread_info(target)->xfsr[0];
  241. gsr = task_thread_info(target)->gsr[0];
  242. } else {
  243. fsr = gsr = 0;
  244. }
  245. if (!ret)
  246. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  247. &fsr,
  248. 32 * sizeof(u64),
  249. 33 * sizeof(u64));
  250. if (!ret)
  251. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  252. &gsr,
  253. 33 * sizeof(u64),
  254. 34 * sizeof(u64));
  255. if (!ret)
  256. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  257. &fprs,
  258. 34 * sizeof(u64),
  259. 35 * sizeof(u64));
  260. if (!ret)
  261. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  262. 35 * sizeof(u64), -1);
  263. return ret;
  264. }
  265. static int fpregs64_set(struct task_struct *target,
  266. const struct user_regset *regset,
  267. unsigned int pos, unsigned int count,
  268. const void *kbuf, const void __user *ubuf)
  269. {
  270. unsigned long *fpregs = task_thread_info(target)->fpregs;
  271. unsigned long fprs;
  272. int ret;
  273. if (target == current)
  274. save_and_clear_fpu();
  275. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  276. fpregs,
  277. 0, 32 * sizeof(u64));
  278. if (!ret)
  279. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  280. task_thread_info(target)->xfsr,
  281. 32 * sizeof(u64),
  282. 33 * sizeof(u64));
  283. if (!ret)
  284. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  285. task_thread_info(target)->gsr,
  286. 33 * sizeof(u64),
  287. 34 * sizeof(u64));
  288. fprs = task_thread_info(target)->fpsaved[0];
  289. if (!ret && count > 0) {
  290. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  291. &fprs,
  292. 34 * sizeof(u64),
  293. 35 * sizeof(u64));
  294. }
  295. fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
  296. task_thread_info(target)->fpsaved[0] = fprs;
  297. if (!ret)
  298. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  299. 35 * sizeof(u64), -1);
  300. return ret;
  301. }
  302. static const struct user_regset sparc64_regsets[] = {
  303. /* Format is:
  304. * G0 --> G7
  305. * O0 --> O7
  306. * L0 --> L7
  307. * I0 --> I7
  308. * TSTATE, TPC, TNPC, Y
  309. */
  310. [REGSET_GENERAL] = {
  311. .core_note_type = NT_PRSTATUS,
  312. .n = 36 * sizeof(u64),
  313. .size = sizeof(u64), .align = sizeof(u64),
  314. .get = genregs64_get, .set = genregs64_set
  315. },
  316. /* Format is:
  317. * F0 --> F63
  318. * FSR
  319. * GSR
  320. * FPRS
  321. */
  322. [REGSET_FP] = {
  323. .core_note_type = NT_PRFPREG,
  324. .n = 35 * sizeof(u64),
  325. .size = sizeof(u64), .align = sizeof(u64),
  326. .get = fpregs64_get, .set = fpregs64_set
  327. },
  328. };
  329. static const struct user_regset_view user_sparc64_view = {
  330. .name = "sparc64", .e_machine = EM_SPARCV9,
  331. .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
  332. };
  333. static int genregs32_get(struct task_struct *target,
  334. const struct user_regset *regset,
  335. unsigned int pos, unsigned int count,
  336. void *kbuf, void __user *ubuf)
  337. {
  338. const struct pt_regs *regs = task_pt_regs(target);
  339. compat_ulong_t __user *reg_window;
  340. compat_ulong_t *k = kbuf;
  341. compat_ulong_t __user *u = ubuf;
  342. compat_ulong_t reg;
  343. if (target == current)
  344. flushw_user();
  345. pos /= sizeof(reg);
  346. count /= sizeof(reg);
  347. if (kbuf) {
  348. for (; count > 0 && pos < 16; count--)
  349. *k++ = regs->u_regs[pos++];
  350. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  351. for (; count > 0 && pos < 32; count--) {
  352. if (get_user(*k++, &reg_window[pos++]))
  353. return -EFAULT;
  354. }
  355. } else {
  356. for (; count > 0 && pos < 16; count--) {
  357. if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
  358. return -EFAULT;
  359. }
  360. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  361. for (; count > 0 && pos < 32; count--) {
  362. if (get_user(reg, &reg_window[pos++]) ||
  363. put_user(reg, u++))
  364. return -EFAULT;
  365. }
  366. }
  367. while (count > 0) {
  368. switch (pos) {
  369. case 32: /* PSR */
  370. reg = tstate_to_psr(regs->tstate);
  371. break;
  372. case 33: /* PC */
  373. reg = regs->tpc;
  374. break;
  375. case 34: /* NPC */
  376. reg = regs->tnpc;
  377. break;
  378. case 35: /* Y */
  379. reg = regs->y;
  380. break;
  381. case 36: /* WIM */
  382. case 37: /* TBR */
  383. reg = 0;
  384. break;
  385. default:
  386. goto finish;
  387. }
  388. if (kbuf)
  389. *k++ = reg;
  390. else if (put_user(reg, u++))
  391. return -EFAULT;
  392. pos++;
  393. count--;
  394. }
  395. finish:
  396. pos *= sizeof(reg);
  397. count *= sizeof(reg);
  398. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  399. 38 * sizeof(reg), -1);
  400. }
  401. static int genregs32_set(struct task_struct *target,
  402. const struct user_regset *regset,
  403. unsigned int pos, unsigned int count,
  404. const void *kbuf, const void __user *ubuf)
  405. {
  406. struct pt_regs *regs = task_pt_regs(target);
  407. compat_ulong_t __user *reg_window;
  408. const compat_ulong_t *k = kbuf;
  409. const compat_ulong_t __user *u = ubuf;
  410. compat_ulong_t reg;
  411. if (target == current)
  412. flushw_user();
  413. pos /= sizeof(reg);
  414. count /= sizeof(reg);
  415. if (kbuf) {
  416. for (; count > 0 && pos < 16; count--)
  417. regs->u_regs[pos++] = *k++;
  418. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  419. for (; count > 0 && pos < 32; count--) {
  420. if (put_user(*k++, &reg_window[pos++]))
  421. return -EFAULT;
  422. }
  423. } else {
  424. for (; count > 0 && pos < 16; count--) {
  425. if (get_user(reg, u++))
  426. return -EFAULT;
  427. regs->u_regs[pos++] = reg;
  428. }
  429. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  430. for (; count > 0 && pos < 32; count--) {
  431. if (get_user(reg, u++) ||
  432. put_user(reg, &reg_window[pos++]))
  433. return -EFAULT;
  434. }
  435. }
  436. while (count > 0) {
  437. unsigned long tstate;
  438. if (kbuf)
  439. reg = *k++;
  440. else if (get_user(reg, u++))
  441. return -EFAULT;
  442. switch (pos) {
  443. case 32: /* PSR */
  444. tstate = regs->tstate;
  445. tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  446. tstate |= psr_to_tstate_icc(reg);
  447. regs->tstate = tstate;
  448. break;
  449. case 33: /* PC */
  450. regs->tpc = reg;
  451. break;
  452. case 34: /* NPC */
  453. regs->tnpc = reg;
  454. break;
  455. case 35: /* Y */
  456. regs->y = reg;
  457. break;
  458. case 36: /* WIM */
  459. case 37: /* TBR */
  460. break;
  461. default:
  462. goto finish;
  463. }
  464. pos++;
  465. count--;
  466. }
  467. finish:
  468. pos *= sizeof(reg);
  469. count *= sizeof(reg);
  470. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  471. 38 * sizeof(reg), -1);
  472. }
  473. static int fpregs32_get(struct task_struct *target,
  474. const struct user_regset *regset,
  475. unsigned int pos, unsigned int count,
  476. void *kbuf, void __user *ubuf)
  477. {
  478. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  479. compat_ulong_t enabled;
  480. unsigned long fprs;
  481. compat_ulong_t fsr;
  482. int ret = 0;
  483. if (target == current)
  484. save_and_clear_fpu();
  485. fprs = task_thread_info(target)->fpsaved[0];
  486. if (fprs & FPRS_FEF) {
  487. fsr = task_thread_info(target)->xfsr[0];
  488. enabled = 1;
  489. } else {
  490. fsr = 0;
  491. enabled = 0;
  492. }
  493. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  494. fpregs,
  495. 0, 32 * sizeof(u32));
  496. if (!ret)
  497. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  498. 32 * sizeof(u32),
  499. 33 * sizeof(u32));
  500. if (!ret)
  501. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  502. &fsr,
  503. 33 * sizeof(u32),
  504. 34 * sizeof(u32));
  505. if (!ret) {
  506. compat_ulong_t val;
  507. val = (enabled << 8) | (8 << 16);
  508. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  509. &val,
  510. 34 * sizeof(u32),
  511. 35 * sizeof(u32));
  512. }
  513. if (!ret)
  514. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  515. 35 * sizeof(u32), -1);
  516. return ret;
  517. }
  518. static int fpregs32_set(struct task_struct *target,
  519. const struct user_regset *regset,
  520. unsigned int pos, unsigned int count,
  521. const void *kbuf, const void __user *ubuf)
  522. {
  523. unsigned long *fpregs = task_thread_info(target)->fpregs;
  524. unsigned long fprs;
  525. int ret;
  526. if (target == current)
  527. save_and_clear_fpu();
  528. fprs = task_thread_info(target)->fpsaved[0];
  529. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  530. fpregs,
  531. 0, 32 * sizeof(u32));
  532. if (!ret)
  533. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  534. 32 * sizeof(u32),
  535. 33 * sizeof(u32));
  536. if (!ret && count > 0) {
  537. compat_ulong_t fsr;
  538. unsigned long val;
  539. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  540. &fsr,
  541. 33 * sizeof(u32),
  542. 34 * sizeof(u32));
  543. if (!ret) {
  544. val = task_thread_info(target)->xfsr[0];
  545. val &= 0xffffffff00000000UL;
  546. val |= fsr;
  547. task_thread_info(target)->xfsr[0] = val;
  548. }
  549. }
  550. fprs |= (FPRS_FEF | FPRS_DL);
  551. task_thread_info(target)->fpsaved[0] = fprs;
  552. if (!ret)
  553. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  554. 34 * sizeof(u32), -1);
  555. return ret;
  556. }
  557. static const struct user_regset sparc32_regsets[] = {
  558. /* Format is:
  559. * G0 --> G7
  560. * O0 --> O7
  561. * L0 --> L7
  562. * I0 --> I7
  563. * PSR, PC, nPC, Y, WIM, TBR
  564. */
  565. [REGSET_GENERAL] = {
  566. .core_note_type = NT_PRSTATUS,
  567. .n = 38 * sizeof(u32),
  568. .size = sizeof(u32), .align = sizeof(u32),
  569. .get = genregs32_get, .set = genregs32_set
  570. },
  571. /* Format is:
  572. * F0 --> F31
  573. * empty 32-bit word
  574. * FSR (32--bit word)
  575. * FPU QUEUE COUNT (8-bit char)
  576. * FPU QUEUE ENTRYSIZE (8-bit char)
  577. * FPU ENABLED (8-bit char)
  578. * empty 8-bit char
  579. * FPU QUEUE (64 32-bit ints)
  580. */
  581. [REGSET_FP] = {
  582. .core_note_type = NT_PRFPREG,
  583. .n = 99 * sizeof(u32),
  584. .size = sizeof(u32), .align = sizeof(u32),
  585. .get = fpregs32_get, .set = fpregs32_set
  586. },
  587. };
  588. static const struct user_regset_view user_sparc32_view = {
  589. .name = "sparc", .e_machine = EM_SPARC,
  590. .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
  591. };
  592. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  593. {
  594. if (test_tsk_thread_flag(task, TIF_32BIT))
  595. return &user_sparc32_view;
  596. return &user_sparc64_view;
  597. }
  598. struct compat_fps {
  599. unsigned int regs[32];
  600. unsigned int fsr;
  601. unsigned int flags;
  602. unsigned int extra;
  603. unsigned int fpqd;
  604. struct compat_fq {
  605. unsigned int insnaddr;
  606. unsigned int insn;
  607. } fpq[16];
  608. };
  609. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  610. compat_ulong_t caddr, compat_ulong_t cdata)
  611. {
  612. const struct user_regset_view *view = task_user_regset_view(child);
  613. compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
  614. struct pt_regs32 __user *pregs;
  615. struct compat_fps __user *fps;
  616. unsigned long addr2 = caddr2;
  617. unsigned long addr = caddr;
  618. unsigned long data = cdata;
  619. int ret;
  620. pregs = (struct pt_regs32 __user *) addr;
  621. fps = (struct compat_fps __user *) addr;
  622. switch (request) {
  623. case PTRACE_PEEKUSR:
  624. ret = (addr != 0) ? -EIO : 0;
  625. break;
  626. case PTRACE_GETREGS:
  627. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  628. 32 * sizeof(u32),
  629. 4 * sizeof(u32),
  630. &pregs->psr);
  631. if (!ret)
  632. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  633. 1 * sizeof(u32),
  634. 15 * sizeof(u32),
  635. &pregs->u_regs[0]);
  636. break;
  637. case PTRACE_SETREGS:
  638. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  639. 32 * sizeof(u32),
  640. 4 * sizeof(u32),
  641. &pregs->psr);
  642. if (!ret)
  643. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  644. 1 * sizeof(u32),
  645. 15 * sizeof(u32),
  646. &pregs->u_regs[0]);
  647. break;
  648. case PTRACE_GETFPREGS:
  649. ret = copy_regset_to_user(child, view, REGSET_FP,
  650. 0 * sizeof(u32),
  651. 32 * sizeof(u32),
  652. &fps->regs[0]);
  653. if (!ret)
  654. ret = copy_regset_to_user(child, view, REGSET_FP,
  655. 33 * sizeof(u32),
  656. 1 * sizeof(u32),
  657. &fps->fsr);
  658. if (!ret) {
  659. if (__put_user(0, &fps->flags) ||
  660. __put_user(0, &fps->extra) ||
  661. __put_user(0, &fps->fpqd) ||
  662. clear_user(&fps->fpq[0], 32 * sizeof(unsigned int)))
  663. ret = -EFAULT;
  664. }
  665. break;
  666. case PTRACE_SETFPREGS:
  667. ret = copy_regset_from_user(child, view, REGSET_FP,
  668. 0 * sizeof(u32),
  669. 32 * sizeof(u32),
  670. &fps->regs[0]);
  671. if (!ret)
  672. ret = copy_regset_from_user(child, view, REGSET_FP,
  673. 33 * sizeof(u32),
  674. 1 * sizeof(u32),
  675. &fps->fsr);
  676. break;
  677. case PTRACE_READTEXT:
  678. case PTRACE_READDATA:
  679. ret = ptrace_readdata(child, addr,
  680. (char __user *)addr2, data);
  681. if (ret == data)
  682. ret = 0;
  683. else if (ret >= 0)
  684. ret = -EIO;
  685. break;
  686. case PTRACE_WRITETEXT:
  687. case PTRACE_WRITEDATA:
  688. ret = ptrace_writedata(child, (char __user *) addr2,
  689. addr, data);
  690. if (ret == data)
  691. ret = 0;
  692. else if (ret >= 0)
  693. ret = -EIO;
  694. break;
  695. default:
  696. ret = compat_ptrace_request(child, request, addr, data);
  697. break;
  698. }
  699. return ret;
  700. }
  701. struct fps {
  702. unsigned int regs[64];
  703. unsigned long fsr;
  704. };
  705. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  706. {
  707. const struct user_regset_view *view = task_user_regset_view(child);
  708. struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
  709. unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
  710. struct fps __user *fps = (struct fps __user *) addr;
  711. int ret;
  712. switch (request) {
  713. case PTRACE_PEEKUSR:
  714. ret = (addr != 0) ? -EIO : 0;
  715. break;
  716. case PTRACE_GETREGS64:
  717. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  718. 1 * sizeof(u64),
  719. 15 * sizeof(u64),
  720. &pregs->u_regs[0]);
  721. if (!ret) {
  722. /* XXX doesn't handle 'y' register correctly XXX */
  723. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  724. 32 * sizeof(u64),
  725. 4 * sizeof(u64),
  726. &pregs->tstate);
  727. }
  728. break;
  729. case PTRACE_SETREGS64:
  730. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  731. 1 * sizeof(u64),
  732. 15 * sizeof(u64),
  733. &pregs->u_regs[0]);
  734. if (!ret) {
  735. /* XXX doesn't handle 'y' register correctly XXX */
  736. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  737. 32 * sizeof(u64),
  738. 4 * sizeof(u64),
  739. &pregs->tstate);
  740. }
  741. break;
  742. case PTRACE_GETFPREGS64:
  743. ret = copy_regset_to_user(child, view, REGSET_FP,
  744. 0 * sizeof(u64),
  745. 33 * sizeof(u64),
  746. fps);
  747. break;
  748. case PTRACE_SETFPREGS64:
  749. ret = copy_regset_to_user(child, view, REGSET_FP,
  750. 0 * sizeof(u64),
  751. 33 * sizeof(u64),
  752. fps);
  753. break;
  754. case PTRACE_READTEXT:
  755. case PTRACE_READDATA:
  756. ret = ptrace_readdata(child, addr,
  757. (char __user *)addr2, data);
  758. if (ret == data)
  759. ret = 0;
  760. else if (ret >= 0)
  761. ret = -EIO;
  762. break;
  763. case PTRACE_WRITETEXT:
  764. case PTRACE_WRITEDATA:
  765. ret = ptrace_writedata(child, (char __user *) addr2,
  766. addr, data);
  767. if (ret == data)
  768. ret = 0;
  769. else if (ret >= 0)
  770. ret = -EIO;
  771. break;
  772. default:
  773. ret = ptrace_request(child, request, addr, data);
  774. break;
  775. }
  776. return ret;
  777. }
  778. asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
  779. {
  780. /* do the secure computing check first */
  781. secure_computing(regs->u_regs[UREG_G1]);
  782. if (unlikely(current->audit_context) && syscall_exit_p) {
  783. unsigned long tstate = regs->tstate;
  784. int result = AUDITSC_SUCCESS;
  785. if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
  786. result = AUDITSC_FAILURE;
  787. audit_syscall_exit(result, regs->u_regs[UREG_I0]);
  788. }
  789. if (!(current->ptrace & PT_PTRACED))
  790. goto out;
  791. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  792. goto out;
  793. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  794. ? 0x80 : 0));
  795. /*
  796. * this isn't the same as continuing with a signal, but it will do
  797. * for normal use. strace only continues with a signal if the
  798. * stopping signal is not SIGTRAP. -brl
  799. */
  800. if (current->exit_code) {
  801. send_sig(current->exit_code, current, 1);
  802. current->exit_code = 0;
  803. }
  804. out:
  805. if (unlikely(current->audit_context) && !syscall_exit_p)
  806. audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
  807. AUDIT_ARCH_SPARC :
  808. AUDIT_ARCH_SPARC64),
  809. regs->u_regs[UREG_G1],
  810. regs->u_regs[UREG_I0],
  811. regs->u_regs[UREG_I1],
  812. regs->u_regs[UREG_I2],
  813. regs->u_regs[UREG_I3]);
  814. }