ptrace_64.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099
  1. /* ptrace.c: Sparc process tracing support.
  2. *
  3. * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. *
  6. * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
  7. * and David Mosberger.
  8. *
  9. * Added Linux support -miguel (weird, eh?, the original code was meant
  10. * to emulate SunOS).
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/smp.h>
  19. #include <linux/security.h>
  20. #include <linux/seccomp.h>
  21. #include <linux/audit.h>
  22. #include <linux/signal.h>
  23. #include <linux/regset.h>
  24. #include <linux/tracehook.h>
  25. #include <trace/syscall.h>
  26. #include <linux/compat.h>
  27. #include <linux/elf.h>
  28. #include <asm/asi.h>
  29. #include <asm/pgtable.h>
  30. #include <asm/system.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/psrcompat.h>
  33. #include <asm/visasm.h>
  34. #include <asm/spitfire.h>
  35. #include <asm/page.h>
  36. #include <asm/cpudata.h>
  37. #include <asm/cacheflush.h>
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/syscalls.h>
  40. #include "entry.h"
  41. /* #define ALLOW_INIT_TRACING */
  42. /*
  43. * Called by kernel/ptrace.c when detaching..
  44. *
  45. * Make sure single step bits etc are not set.
  46. */
  47. void ptrace_disable(struct task_struct *child)
  48. {
  49. /* nothing to do */
  50. }
  51. /* To get the necessary page struct, access_process_vm() first calls
  52. * get_user_pages(). This has done a flush_dcache_page() on the
  53. * accessed page. Then our caller (copy_{to,from}_user_page()) did
  54. * to memcpy to read/write the data from that page.
  55. *
  56. * Now, the only thing we have to do is:
  57. * 1) flush the D-cache if it's possible than an illegal alias
  58. * has been created
  59. * 2) flush the I-cache if this is pre-cheetah and we did a write
  60. */
  61. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  62. unsigned long uaddr, void *kaddr,
  63. unsigned long len, int write)
  64. {
  65. BUG_ON(len > PAGE_SIZE);
  66. if (tlb_type == hypervisor)
  67. return;
  68. preempt_disable();
  69. #ifdef DCACHE_ALIASING_POSSIBLE
  70. /* If bit 13 of the kernel address we used to access the
  71. * user page is the same as the virtual address that page
  72. * is mapped to in the user's address space, we can skip the
  73. * D-cache flush.
  74. */
  75. if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
  76. unsigned long start = __pa(kaddr);
  77. unsigned long end = start + len;
  78. unsigned long dcache_line_size;
  79. dcache_line_size = local_cpu_data().dcache_line_size;
  80. if (tlb_type == spitfire) {
  81. for (; start < end; start += dcache_line_size)
  82. spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
  83. } else {
  84. start &= ~(dcache_line_size - 1);
  85. for (; start < end; start += dcache_line_size)
  86. __asm__ __volatile__(
  87. "stxa %%g0, [%0] %1\n\t"
  88. "membar #Sync"
  89. : /* no outputs */
  90. : "r" (start),
  91. "i" (ASI_DCACHE_INVALIDATE));
  92. }
  93. }
  94. #endif
  95. if (write && tlb_type == spitfire) {
  96. unsigned long start = (unsigned long) kaddr;
  97. unsigned long end = start + len;
  98. unsigned long icache_line_size;
  99. icache_line_size = local_cpu_data().icache_line_size;
  100. for (; start < end; start += icache_line_size)
  101. flushi(start);
  102. }
  103. preempt_enable();
  104. }
  105. static int get_from_target(struct task_struct *target, unsigned long uaddr,
  106. void *kbuf, int len)
  107. {
  108. if (target == current) {
  109. if (copy_from_user(kbuf, (void __user *) uaddr, len))
  110. return -EFAULT;
  111. } else {
  112. int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
  113. if (len2 != len)
  114. return -EFAULT;
  115. }
  116. return 0;
  117. }
  118. static int set_to_target(struct task_struct *target, unsigned long uaddr,
  119. void *kbuf, int len)
  120. {
  121. if (target == current) {
  122. if (copy_to_user((void __user *) uaddr, kbuf, len))
  123. return -EFAULT;
  124. } else {
  125. int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
  126. if (len2 != len)
  127. return -EFAULT;
  128. }
  129. return 0;
  130. }
  131. static int regwindow64_get(struct task_struct *target,
  132. const struct pt_regs *regs,
  133. struct reg_window *wbuf)
  134. {
  135. unsigned long rw_addr = regs->u_regs[UREG_I6];
  136. if (test_tsk_thread_flag(current, TIF_32BIT)) {
  137. struct reg_window32 win32;
  138. int i;
  139. if (get_from_target(target, rw_addr, &win32, sizeof(win32)))
  140. return -EFAULT;
  141. for (i = 0; i < 8; i++)
  142. wbuf->locals[i] = win32.locals[i];
  143. for (i = 0; i < 8; i++)
  144. wbuf->ins[i] = win32.ins[i];
  145. } else {
  146. rw_addr += STACK_BIAS;
  147. if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf)))
  148. return -EFAULT;
  149. }
  150. return 0;
  151. }
  152. static int regwindow64_set(struct task_struct *target,
  153. const struct pt_regs *regs,
  154. struct reg_window *wbuf)
  155. {
  156. unsigned long rw_addr = regs->u_regs[UREG_I6];
  157. if (test_tsk_thread_flag(current, TIF_32BIT)) {
  158. struct reg_window32 win32;
  159. int i;
  160. for (i = 0; i < 8; i++)
  161. win32.locals[i] = wbuf->locals[i];
  162. for (i = 0; i < 8; i++)
  163. win32.ins[i] = wbuf->ins[i];
  164. if (set_to_target(target, rw_addr, &win32, sizeof(win32)))
  165. return -EFAULT;
  166. } else {
  167. rw_addr += STACK_BIAS;
  168. if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf)))
  169. return -EFAULT;
  170. }
  171. return 0;
  172. }
  173. enum sparc_regset {
  174. REGSET_GENERAL,
  175. REGSET_FP,
  176. };
  177. static int genregs64_get(struct task_struct *target,
  178. const struct user_regset *regset,
  179. unsigned int pos, unsigned int count,
  180. void *kbuf, void __user *ubuf)
  181. {
  182. const struct pt_regs *regs = task_pt_regs(target);
  183. int ret;
  184. if (target == current)
  185. flushw_user();
  186. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  187. regs->u_regs,
  188. 0, 16 * sizeof(u64));
  189. if (!ret && count && pos < (32 * sizeof(u64))) {
  190. struct reg_window window;
  191. if (regwindow64_get(target, regs, &window))
  192. return -EFAULT;
  193. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  194. &window,
  195. 16 * sizeof(u64),
  196. 32 * sizeof(u64));
  197. }
  198. if (!ret) {
  199. /* TSTATE, TPC, TNPC */
  200. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  201. &regs->tstate,
  202. 32 * sizeof(u64),
  203. 35 * sizeof(u64));
  204. }
  205. if (!ret) {
  206. unsigned long y = regs->y;
  207. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  208. &y,
  209. 35 * sizeof(u64),
  210. 36 * sizeof(u64));
  211. }
  212. if (!ret) {
  213. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  214. 36 * sizeof(u64), -1);
  215. }
  216. return ret;
  217. }
  218. static int genregs64_set(struct task_struct *target,
  219. const struct user_regset *regset,
  220. unsigned int pos, unsigned int count,
  221. const void *kbuf, const void __user *ubuf)
  222. {
  223. struct pt_regs *regs = task_pt_regs(target);
  224. int ret;
  225. if (target == current)
  226. flushw_user();
  227. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  228. regs->u_regs,
  229. 0, 16 * sizeof(u64));
  230. if (!ret && count && pos < (32 * sizeof(u64))) {
  231. struct reg_window window;
  232. if (regwindow64_get(target, regs, &window))
  233. return -EFAULT;
  234. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  235. &window,
  236. 16 * sizeof(u64),
  237. 32 * sizeof(u64));
  238. if (!ret &&
  239. regwindow64_set(target, regs, &window))
  240. return -EFAULT;
  241. }
  242. if (!ret && count > 0) {
  243. unsigned long tstate;
  244. /* TSTATE */
  245. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  246. &tstate,
  247. 32 * sizeof(u64),
  248. 33 * sizeof(u64));
  249. if (!ret) {
  250. /* Only the condition codes and the "in syscall"
  251. * state can be modified in the %tstate register.
  252. */
  253. tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
  254. regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
  255. regs->tstate |= tstate;
  256. }
  257. }
  258. if (!ret) {
  259. /* TPC, TNPC */
  260. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  261. &regs->tpc,
  262. 33 * sizeof(u64),
  263. 35 * sizeof(u64));
  264. }
  265. if (!ret) {
  266. unsigned long y;
  267. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  268. &y,
  269. 35 * sizeof(u64),
  270. 36 * sizeof(u64));
  271. if (!ret)
  272. regs->y = y;
  273. }
  274. if (!ret)
  275. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  276. 36 * sizeof(u64), -1);
  277. return ret;
  278. }
  279. static int fpregs64_get(struct task_struct *target,
  280. const struct user_regset *regset,
  281. unsigned int pos, unsigned int count,
  282. void *kbuf, void __user *ubuf)
  283. {
  284. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  285. unsigned long fprs, fsr, gsr;
  286. int ret;
  287. if (target == current)
  288. save_and_clear_fpu();
  289. fprs = task_thread_info(target)->fpsaved[0];
  290. if (fprs & FPRS_DL)
  291. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  292. fpregs,
  293. 0, 16 * sizeof(u64));
  294. else
  295. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  296. 0,
  297. 16 * sizeof(u64));
  298. if (!ret) {
  299. if (fprs & FPRS_DU)
  300. ret = user_regset_copyout(&pos, &count,
  301. &kbuf, &ubuf,
  302. fpregs + 16,
  303. 16 * sizeof(u64),
  304. 32 * sizeof(u64));
  305. else
  306. ret = user_regset_copyout_zero(&pos, &count,
  307. &kbuf, &ubuf,
  308. 16 * sizeof(u64),
  309. 32 * sizeof(u64));
  310. }
  311. if (fprs & FPRS_FEF) {
  312. fsr = task_thread_info(target)->xfsr[0];
  313. gsr = task_thread_info(target)->gsr[0];
  314. } else {
  315. fsr = gsr = 0;
  316. }
  317. if (!ret)
  318. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  319. &fsr,
  320. 32 * sizeof(u64),
  321. 33 * sizeof(u64));
  322. if (!ret)
  323. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  324. &gsr,
  325. 33 * sizeof(u64),
  326. 34 * sizeof(u64));
  327. if (!ret)
  328. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  329. &fprs,
  330. 34 * sizeof(u64),
  331. 35 * sizeof(u64));
  332. if (!ret)
  333. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  334. 35 * sizeof(u64), -1);
  335. return ret;
  336. }
  337. static int fpregs64_set(struct task_struct *target,
  338. const struct user_regset *regset,
  339. unsigned int pos, unsigned int count,
  340. const void *kbuf, const void __user *ubuf)
  341. {
  342. unsigned long *fpregs = task_thread_info(target)->fpregs;
  343. unsigned long fprs;
  344. int ret;
  345. if (target == current)
  346. save_and_clear_fpu();
  347. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  348. fpregs,
  349. 0, 32 * sizeof(u64));
  350. if (!ret)
  351. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  352. task_thread_info(target)->xfsr,
  353. 32 * sizeof(u64),
  354. 33 * sizeof(u64));
  355. if (!ret)
  356. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  357. task_thread_info(target)->gsr,
  358. 33 * sizeof(u64),
  359. 34 * sizeof(u64));
  360. fprs = task_thread_info(target)->fpsaved[0];
  361. if (!ret && count > 0) {
  362. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  363. &fprs,
  364. 34 * sizeof(u64),
  365. 35 * sizeof(u64));
  366. }
  367. fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
  368. task_thread_info(target)->fpsaved[0] = fprs;
  369. if (!ret)
  370. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  371. 35 * sizeof(u64), -1);
  372. return ret;
  373. }
  374. static const struct user_regset sparc64_regsets[] = {
  375. /* Format is:
  376. * G0 --> G7
  377. * O0 --> O7
  378. * L0 --> L7
  379. * I0 --> I7
  380. * TSTATE, TPC, TNPC, Y
  381. */
  382. [REGSET_GENERAL] = {
  383. .core_note_type = NT_PRSTATUS,
  384. .n = 36,
  385. .size = sizeof(u64), .align = sizeof(u64),
  386. .get = genregs64_get, .set = genregs64_set
  387. },
  388. /* Format is:
  389. * F0 --> F63
  390. * FSR
  391. * GSR
  392. * FPRS
  393. */
  394. [REGSET_FP] = {
  395. .core_note_type = NT_PRFPREG,
  396. .n = 35,
  397. .size = sizeof(u64), .align = sizeof(u64),
  398. .get = fpregs64_get, .set = fpregs64_set
  399. },
  400. };
  401. static const struct user_regset_view user_sparc64_view = {
  402. .name = "sparc64", .e_machine = EM_SPARCV9,
  403. .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
  404. };
  405. #ifdef CONFIG_COMPAT
  406. static int genregs32_get(struct task_struct *target,
  407. const struct user_regset *regset,
  408. unsigned int pos, unsigned int count,
  409. void *kbuf, void __user *ubuf)
  410. {
  411. const struct pt_regs *regs = task_pt_regs(target);
  412. compat_ulong_t __user *reg_window;
  413. compat_ulong_t *k = kbuf;
  414. compat_ulong_t __user *u = ubuf;
  415. compat_ulong_t reg;
  416. if (target == current)
  417. flushw_user();
  418. pos /= sizeof(reg);
  419. count /= sizeof(reg);
  420. if (kbuf) {
  421. for (; count > 0 && pos < 16; count--)
  422. *k++ = regs->u_regs[pos++];
  423. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  424. if (target == current) {
  425. for (; count > 0 && pos < 32; count--) {
  426. if (get_user(*k++, &reg_window[pos++]))
  427. return -EFAULT;
  428. }
  429. } else {
  430. for (; count > 0 && pos < 32; count--) {
  431. if (access_process_vm(target,
  432. (unsigned long)
  433. &reg_window[pos],
  434. k, sizeof(*k), 0)
  435. != sizeof(*k))
  436. return -EFAULT;
  437. k++;
  438. pos++;
  439. }
  440. }
  441. } else {
  442. for (; count > 0 && pos < 16; count--) {
  443. if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
  444. return -EFAULT;
  445. }
  446. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  447. if (target == current) {
  448. for (; count > 0 && pos < 32; count--) {
  449. if (get_user(reg, &reg_window[pos++]) ||
  450. put_user(reg, u++))
  451. return -EFAULT;
  452. }
  453. } else {
  454. for (; count > 0 && pos < 32; count--) {
  455. if (access_process_vm(target,
  456. (unsigned long)
  457. &reg_window[pos],
  458. &reg, sizeof(reg), 0)
  459. != sizeof(reg))
  460. return -EFAULT;
  461. if (access_process_vm(target,
  462. (unsigned long) u,
  463. &reg, sizeof(reg), 1)
  464. != sizeof(reg))
  465. return -EFAULT;
  466. pos++;
  467. u++;
  468. }
  469. }
  470. }
  471. while (count > 0) {
  472. switch (pos) {
  473. case 32: /* PSR */
  474. reg = tstate_to_psr(regs->tstate);
  475. break;
  476. case 33: /* PC */
  477. reg = regs->tpc;
  478. break;
  479. case 34: /* NPC */
  480. reg = regs->tnpc;
  481. break;
  482. case 35: /* Y */
  483. reg = regs->y;
  484. break;
  485. case 36: /* WIM */
  486. case 37: /* TBR */
  487. reg = 0;
  488. break;
  489. default:
  490. goto finish;
  491. }
  492. if (kbuf)
  493. *k++ = reg;
  494. else if (put_user(reg, u++))
  495. return -EFAULT;
  496. pos++;
  497. count--;
  498. }
  499. finish:
  500. pos *= sizeof(reg);
  501. count *= sizeof(reg);
  502. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  503. 38 * sizeof(reg), -1);
  504. }
  505. static int genregs32_set(struct task_struct *target,
  506. const struct user_regset *regset,
  507. unsigned int pos, unsigned int count,
  508. const void *kbuf, const void __user *ubuf)
  509. {
  510. struct pt_regs *regs = task_pt_regs(target);
  511. compat_ulong_t __user *reg_window;
  512. const compat_ulong_t *k = kbuf;
  513. const compat_ulong_t __user *u = ubuf;
  514. compat_ulong_t reg;
  515. if (target == current)
  516. flushw_user();
  517. pos /= sizeof(reg);
  518. count /= sizeof(reg);
  519. if (kbuf) {
  520. for (; count > 0 && pos < 16; count--)
  521. regs->u_regs[pos++] = *k++;
  522. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  523. if (target == current) {
  524. for (; count > 0 && pos < 32; count--) {
  525. if (put_user(*k++, &reg_window[pos++]))
  526. return -EFAULT;
  527. }
  528. } else {
  529. for (; count > 0 && pos < 32; count--) {
  530. if (access_process_vm(target,
  531. (unsigned long)
  532. &reg_window[pos],
  533. (void *) k,
  534. sizeof(*k), 1)
  535. != sizeof(*k))
  536. return -EFAULT;
  537. k++;
  538. pos++;
  539. }
  540. }
  541. } else {
  542. for (; count > 0 && pos < 16; count--) {
  543. if (get_user(reg, u++))
  544. return -EFAULT;
  545. regs->u_regs[pos++] = reg;
  546. }
  547. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  548. if (target == current) {
  549. for (; count > 0 && pos < 32; count--) {
  550. if (get_user(reg, u++) ||
  551. put_user(reg, &reg_window[pos++]))
  552. return -EFAULT;
  553. }
  554. } else {
  555. for (; count > 0 && pos < 32; count--) {
  556. if (access_process_vm(target,
  557. (unsigned long)
  558. u,
  559. &reg, sizeof(reg), 0)
  560. != sizeof(reg))
  561. return -EFAULT;
  562. if (access_process_vm(target,
  563. (unsigned long)
  564. &reg_window[pos],
  565. &reg, sizeof(reg), 1)
  566. != sizeof(reg))
  567. return -EFAULT;
  568. pos++;
  569. u++;
  570. }
  571. }
  572. }
  573. while (count > 0) {
  574. unsigned long tstate;
  575. if (kbuf)
  576. reg = *k++;
  577. else if (get_user(reg, u++))
  578. return -EFAULT;
  579. switch (pos) {
  580. case 32: /* PSR */
  581. tstate = regs->tstate;
  582. tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL);
  583. tstate |= psr_to_tstate_icc(reg);
  584. if (reg & PSR_SYSCALL)
  585. tstate |= TSTATE_SYSCALL;
  586. regs->tstate = tstate;
  587. break;
  588. case 33: /* PC */
  589. regs->tpc = reg;
  590. break;
  591. case 34: /* NPC */
  592. regs->tnpc = reg;
  593. break;
  594. case 35: /* Y */
  595. regs->y = reg;
  596. break;
  597. case 36: /* WIM */
  598. case 37: /* TBR */
  599. break;
  600. default:
  601. goto finish;
  602. }
  603. pos++;
  604. count--;
  605. }
  606. finish:
  607. pos *= sizeof(reg);
  608. count *= sizeof(reg);
  609. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  610. 38 * sizeof(reg), -1);
  611. }
  612. static int fpregs32_get(struct task_struct *target,
  613. const struct user_regset *regset,
  614. unsigned int pos, unsigned int count,
  615. void *kbuf, void __user *ubuf)
  616. {
  617. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  618. compat_ulong_t enabled;
  619. unsigned long fprs;
  620. compat_ulong_t fsr;
  621. int ret = 0;
  622. if (target == current)
  623. save_and_clear_fpu();
  624. fprs = task_thread_info(target)->fpsaved[0];
  625. if (fprs & FPRS_FEF) {
  626. fsr = task_thread_info(target)->xfsr[0];
  627. enabled = 1;
  628. } else {
  629. fsr = 0;
  630. enabled = 0;
  631. }
  632. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  633. fpregs,
  634. 0, 32 * sizeof(u32));
  635. if (!ret)
  636. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  637. 32 * sizeof(u32),
  638. 33 * sizeof(u32));
  639. if (!ret)
  640. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  641. &fsr,
  642. 33 * sizeof(u32),
  643. 34 * sizeof(u32));
  644. if (!ret) {
  645. compat_ulong_t val;
  646. val = (enabled << 8) | (8 << 16);
  647. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  648. &val,
  649. 34 * sizeof(u32),
  650. 35 * sizeof(u32));
  651. }
  652. if (!ret)
  653. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  654. 35 * sizeof(u32), -1);
  655. return ret;
  656. }
  657. static int fpregs32_set(struct task_struct *target,
  658. const struct user_regset *regset,
  659. unsigned int pos, unsigned int count,
  660. const void *kbuf, const void __user *ubuf)
  661. {
  662. unsigned long *fpregs = task_thread_info(target)->fpregs;
  663. unsigned long fprs;
  664. int ret;
  665. if (target == current)
  666. save_and_clear_fpu();
  667. fprs = task_thread_info(target)->fpsaved[0];
  668. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  669. fpregs,
  670. 0, 32 * sizeof(u32));
  671. if (!ret)
  672. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  673. 32 * sizeof(u32),
  674. 33 * sizeof(u32));
  675. if (!ret && count > 0) {
  676. compat_ulong_t fsr;
  677. unsigned long val;
  678. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  679. &fsr,
  680. 33 * sizeof(u32),
  681. 34 * sizeof(u32));
  682. if (!ret) {
  683. val = task_thread_info(target)->xfsr[0];
  684. val &= 0xffffffff00000000UL;
  685. val |= fsr;
  686. task_thread_info(target)->xfsr[0] = val;
  687. }
  688. }
  689. fprs |= (FPRS_FEF | FPRS_DL);
  690. task_thread_info(target)->fpsaved[0] = fprs;
  691. if (!ret)
  692. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  693. 34 * sizeof(u32), -1);
  694. return ret;
  695. }
  696. static const struct user_regset sparc32_regsets[] = {
  697. /* Format is:
  698. * G0 --> G7
  699. * O0 --> O7
  700. * L0 --> L7
  701. * I0 --> I7
  702. * PSR, PC, nPC, Y, WIM, TBR
  703. */
  704. [REGSET_GENERAL] = {
  705. .core_note_type = NT_PRSTATUS,
  706. .n = 38,
  707. .size = sizeof(u32), .align = sizeof(u32),
  708. .get = genregs32_get, .set = genregs32_set
  709. },
  710. /* Format is:
  711. * F0 --> F31
  712. * empty 32-bit word
  713. * FSR (32--bit word)
  714. * FPU QUEUE COUNT (8-bit char)
  715. * FPU QUEUE ENTRYSIZE (8-bit char)
  716. * FPU ENABLED (8-bit char)
  717. * empty 8-bit char
  718. * FPU QUEUE (64 32-bit ints)
  719. */
  720. [REGSET_FP] = {
  721. .core_note_type = NT_PRFPREG,
  722. .n = 99,
  723. .size = sizeof(u32), .align = sizeof(u32),
  724. .get = fpregs32_get, .set = fpregs32_set
  725. },
  726. };
  727. static const struct user_regset_view user_sparc32_view = {
  728. .name = "sparc", .e_machine = EM_SPARC,
  729. .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
  730. };
  731. #endif /* CONFIG_COMPAT */
  732. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  733. {
  734. #ifdef CONFIG_COMPAT
  735. if (test_tsk_thread_flag(task, TIF_32BIT))
  736. return &user_sparc32_view;
  737. #endif
  738. return &user_sparc64_view;
  739. }
  740. #ifdef CONFIG_COMPAT
  741. struct compat_fps {
  742. unsigned int regs[32];
  743. unsigned int fsr;
  744. unsigned int flags;
  745. unsigned int extra;
  746. unsigned int fpqd;
  747. struct compat_fq {
  748. unsigned int insnaddr;
  749. unsigned int insn;
  750. } fpq[16];
  751. };
  752. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  753. compat_ulong_t caddr, compat_ulong_t cdata)
  754. {
  755. const struct user_regset_view *view = task_user_regset_view(current);
  756. compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4];
  757. struct pt_regs32 __user *pregs;
  758. struct compat_fps __user *fps;
  759. unsigned long addr2 = caddr2;
  760. unsigned long addr = caddr;
  761. unsigned long data = cdata;
  762. int ret;
  763. pregs = (struct pt_regs32 __user *) addr;
  764. fps = (struct compat_fps __user *) addr;
  765. switch (request) {
  766. case PTRACE_PEEKUSR:
  767. ret = (addr != 0) ? -EIO : 0;
  768. break;
  769. case PTRACE_GETREGS:
  770. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  771. 32 * sizeof(u32),
  772. 4 * sizeof(u32),
  773. &pregs->psr);
  774. if (!ret)
  775. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  776. 1 * sizeof(u32),
  777. 15 * sizeof(u32),
  778. &pregs->u_regs[0]);
  779. break;
  780. case PTRACE_SETREGS:
  781. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  782. 32 * sizeof(u32),
  783. 4 * sizeof(u32),
  784. &pregs->psr);
  785. if (!ret)
  786. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  787. 1 * sizeof(u32),
  788. 15 * sizeof(u32),
  789. &pregs->u_regs[0]);
  790. break;
  791. case PTRACE_GETFPREGS:
  792. ret = copy_regset_to_user(child, view, REGSET_FP,
  793. 0 * sizeof(u32),
  794. 32 * sizeof(u32),
  795. &fps->regs[0]);
  796. if (!ret)
  797. ret = copy_regset_to_user(child, view, REGSET_FP,
  798. 33 * sizeof(u32),
  799. 1 * sizeof(u32),
  800. &fps->fsr);
  801. if (!ret) {
  802. if (__put_user(0, &fps->flags) ||
  803. __put_user(0, &fps->extra) ||
  804. __put_user(0, &fps->fpqd) ||
  805. clear_user(&fps->fpq[0], 32 * sizeof(unsigned int)))
  806. ret = -EFAULT;
  807. }
  808. break;
  809. case PTRACE_SETFPREGS:
  810. ret = copy_regset_from_user(child, view, REGSET_FP,
  811. 0 * sizeof(u32),
  812. 32 * sizeof(u32),
  813. &fps->regs[0]);
  814. if (!ret)
  815. ret = copy_regset_from_user(child, view, REGSET_FP,
  816. 33 * sizeof(u32),
  817. 1 * sizeof(u32),
  818. &fps->fsr);
  819. break;
  820. case PTRACE_READTEXT:
  821. case PTRACE_READDATA:
  822. ret = ptrace_readdata(child, addr,
  823. (char __user *)addr2, data);
  824. if (ret == data)
  825. ret = 0;
  826. else if (ret >= 0)
  827. ret = -EIO;
  828. break;
  829. case PTRACE_WRITETEXT:
  830. case PTRACE_WRITEDATA:
  831. ret = ptrace_writedata(child, (char __user *) addr2,
  832. addr, data);
  833. if (ret == data)
  834. ret = 0;
  835. else if (ret >= 0)
  836. ret = -EIO;
  837. break;
  838. default:
  839. if (request == PTRACE_SPARC_DETACH)
  840. request = PTRACE_DETACH;
  841. ret = compat_ptrace_request(child, request, addr, data);
  842. break;
  843. }
  844. return ret;
  845. }
  846. #endif /* CONFIG_COMPAT */
  847. struct fps {
  848. unsigned int regs[64];
  849. unsigned long fsr;
  850. };
  851. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  852. {
  853. const struct user_regset_view *view = task_user_regset_view(current);
  854. unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
  855. struct pt_regs __user *pregs;
  856. struct fps __user *fps;
  857. int ret;
  858. pregs = (struct pt_regs __user *) (unsigned long) addr;
  859. fps = (struct fps __user *) (unsigned long) addr;
  860. switch (request) {
  861. case PTRACE_PEEKUSR:
  862. ret = (addr != 0) ? -EIO : 0;
  863. break;
  864. case PTRACE_GETREGS64:
  865. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  866. 1 * sizeof(u64),
  867. 15 * sizeof(u64),
  868. &pregs->u_regs[0]);
  869. if (!ret) {
  870. /* XXX doesn't handle 'y' register correctly XXX */
  871. ret = copy_regset_to_user(child, view, REGSET_GENERAL,
  872. 32 * sizeof(u64),
  873. 4 * sizeof(u64),
  874. &pregs->tstate);
  875. }
  876. break;
  877. case PTRACE_SETREGS64:
  878. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  879. 1 * sizeof(u64),
  880. 15 * sizeof(u64),
  881. &pregs->u_regs[0]);
  882. if (!ret) {
  883. /* XXX doesn't handle 'y' register correctly XXX */
  884. ret = copy_regset_from_user(child, view, REGSET_GENERAL,
  885. 32 * sizeof(u64),
  886. 4 * sizeof(u64),
  887. &pregs->tstate);
  888. }
  889. break;
  890. case PTRACE_GETFPREGS64:
  891. ret = copy_regset_to_user(child, view, REGSET_FP,
  892. 0 * sizeof(u64),
  893. 33 * sizeof(u64),
  894. fps);
  895. break;
  896. case PTRACE_SETFPREGS64:
  897. ret = copy_regset_from_user(child, view, REGSET_FP,
  898. 0 * sizeof(u64),
  899. 33 * sizeof(u64),
  900. fps);
  901. break;
  902. case PTRACE_READTEXT:
  903. case PTRACE_READDATA:
  904. ret = ptrace_readdata(child, addr,
  905. (char __user *)addr2, data);
  906. if (ret == data)
  907. ret = 0;
  908. else if (ret >= 0)
  909. ret = -EIO;
  910. break;
  911. case PTRACE_WRITETEXT:
  912. case PTRACE_WRITEDATA:
  913. ret = ptrace_writedata(child, (char __user *) addr2,
  914. addr, data);
  915. if (ret == data)
  916. ret = 0;
  917. else if (ret >= 0)
  918. ret = -EIO;
  919. break;
  920. default:
  921. if (request == PTRACE_SPARC_DETACH)
  922. request = PTRACE_DETACH;
  923. ret = ptrace_request(child, request, addr, data);
  924. break;
  925. }
  926. return ret;
  927. }
  928. asmlinkage int syscall_trace_enter(struct pt_regs *regs)
  929. {
  930. int ret = 0;
  931. /* do the secure computing check first */
  932. secure_computing(regs->u_regs[UREG_G1]);
  933. if (test_thread_flag(TIF_SYSCALL_TRACE))
  934. ret = tracehook_report_syscall_entry(regs);
  935. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  936. trace_sys_enter(regs, regs->u_regs[UREG_G1]);
  937. if (unlikely(current->audit_context) && !ret)
  938. audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
  939. AUDIT_ARCH_SPARC :
  940. AUDIT_ARCH_SPARC64),
  941. regs->u_regs[UREG_G1],
  942. regs->u_regs[UREG_I0],
  943. regs->u_regs[UREG_I1],
  944. regs->u_regs[UREG_I2],
  945. regs->u_regs[UREG_I3]);
  946. return ret;
  947. }
  948. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  949. {
  950. if (unlikely(current->audit_context)) {
  951. unsigned long tstate = regs->tstate;
  952. int result = AUDITSC_SUCCESS;
  953. if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
  954. result = AUDITSC_FAILURE;
  955. audit_syscall_exit(result, regs->u_regs[UREG_I0]);
  956. }
  957. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  958. trace_sys_exit(regs, regs->u_regs[UREG_G1]);
  959. if (test_thread_flag(TIF_SYSCALL_TRACE))
  960. tracehook_report_syscall_exit(regs, 0);
  961. }