ptrace.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /* ptrace.c: Sparc process tracing support.
  2. *
  3. * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. *
  6. * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
  7. * and David Mosberger.
  8. *
  9. * Added Linux support -miguel (weird, eh?, the original code was meant
  10. * to emulate SunOS).
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/smp.h>
  19. #include <linux/smp_lock.h>
  20. #include <linux/security.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/audit.h>
  23. #include <linux/signal.h>
  24. #include <linux/regset.h>
  25. #include <linux/compat.h>
  26. #include <linux/elf.h>
  27. #include <asm/asi.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/system.h>
  30. #include <asm/uaccess.h>
  31. #include <asm/psrcompat.h>
  32. #include <asm/visasm.h>
  33. #include <asm/spitfire.h>
  34. #include <asm/page.h>
  35. #include <asm/cpudata.h>
  36. /* Returning from ptrace is a bit tricky because the syscall return
  37. * low level code assumes any value returned which is negative and
  38. * is a valid errno will mean setting the condition codes to indicate
  39. * an error return. This doesn't work, so we have this hook.
  40. */
  41. static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
  42. {
  43. regs->u_regs[UREG_I0] = error;
  44. regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
  45. regs->tpc = regs->tnpc;
  46. regs->tnpc += 4;
  47. }
  48. static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
  49. {
  50. regs->u_regs[UREG_I0] = value;
  51. regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
  52. regs->tpc = regs->tnpc;
  53. regs->tnpc += 4;
  54. }
  55. static inline void
  56. pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
  57. {
  58. if (test_thread_flag(TIF_32BIT)) {
  59. if (put_user(value, (unsigned int __user *) addr)) {
  60. pt_error_return(regs, EFAULT);
  61. return;
  62. }
  63. } else {
  64. if (put_user(value, (long __user *) addr)) {
  65. pt_error_return(regs, EFAULT);
  66. return;
  67. }
  68. }
  69. regs->u_regs[UREG_I0] = 0;
  70. regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
  71. regs->tpc = regs->tnpc;
  72. regs->tnpc += 4;
  73. }
  74. static void
  75. pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
  76. {
  77. if (current->personality == PER_SUNOS)
  78. pt_succ_return (regs, val);
  79. else
  80. pt_succ_return_linux (regs, val, addr);
  81. }
  82. /* #define ALLOW_INIT_TRACING */
  83. /* #define DEBUG_PTRACE */
  84. #ifdef DEBUG_PTRACE
  85. char *pt_rq [] = {
  86. /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
  87. /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
  88. /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
  89. /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
  90. /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
  91. /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
  92. /* 24 */ "SYSCALL", ""
  93. };
  94. #endif
  95. /*
  96. * Called by kernel/ptrace.c when detaching..
  97. *
  98. * Make sure single step bits etc are not set.
  99. */
  100. void ptrace_disable(struct task_struct *child)
  101. {
  102. /* nothing to do */
  103. }
  104. /* To get the necessary page struct, access_process_vm() first calls
  105. * get_user_pages(). This has done a flush_dcache_page() on the
  106. * accessed page. Then our caller (copy_{to,from}_user_page()) did
  107. * to memcpy to read/write the data from that page.
  108. *
  109. * Now, the only thing we have to do is:
  110. * 1) flush the D-cache if it's possible than an illegal alias
  111. * has been created
  112. * 2) flush the I-cache if this is pre-cheetah and we did a write
  113. */
  114. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  115. unsigned long uaddr, void *kaddr,
  116. unsigned long len, int write)
  117. {
  118. BUG_ON(len > PAGE_SIZE);
  119. if (tlb_type == hypervisor)
  120. return;
  121. #ifdef DCACHE_ALIASING_POSSIBLE
  122. /* If bit 13 of the kernel address we used to access the
  123. * user page is the same as the virtual address that page
  124. * is mapped to in the user's address space, we can skip the
  125. * D-cache flush.
  126. */
  127. if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
  128. unsigned long start = __pa(kaddr);
  129. unsigned long end = start + len;
  130. unsigned long dcache_line_size;
  131. dcache_line_size = local_cpu_data().dcache_line_size;
  132. if (tlb_type == spitfire) {
  133. for (; start < end; start += dcache_line_size)
  134. spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
  135. } else {
  136. start &= ~(dcache_line_size - 1);
  137. for (; start < end; start += dcache_line_size)
  138. __asm__ __volatile__(
  139. "stxa %%g0, [%0] %1\n\t"
  140. "membar #Sync"
  141. : /* no outputs */
  142. : "r" (start),
  143. "i" (ASI_DCACHE_INVALIDATE));
  144. }
  145. }
  146. #endif
  147. if (write && tlb_type == spitfire) {
  148. unsigned long start = (unsigned long) kaddr;
  149. unsigned long end = start + len;
  150. unsigned long icache_line_size;
  151. icache_line_size = local_cpu_data().icache_line_size;
  152. for (; start < end; start += icache_line_size)
  153. flushi(start);
  154. }
  155. }
  156. enum sparc_regset {
  157. REGSET_GENERAL,
  158. REGSET_FP,
  159. };
  160. static int genregs64_get(struct task_struct *target,
  161. const struct user_regset *regset,
  162. unsigned int pos, unsigned int count,
  163. void *kbuf, void __user *ubuf)
  164. {
  165. const struct pt_regs *regs = task_pt_regs(target);
  166. int ret;
  167. if (target == current)
  168. flushw_user();
  169. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  170. regs->u_regs,
  171. 0, 16 * sizeof(u64));
  172. if (!ret) {
  173. unsigned long __user *reg_window = (unsigned long __user *)
  174. (regs->u_regs[UREG_I6] + STACK_BIAS);
  175. unsigned long window[16];
  176. if (copy_from_user(window, reg_window, sizeof(window)))
  177. return -EFAULT;
  178. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  179. window,
  180. 16 * sizeof(u64),
  181. 32 * sizeof(u64));
  182. }
  183. if (!ret) {
  184. /* TSTATE, TPC, TNPC */
  185. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  186. &regs->tstate,
  187. 32 * sizeof(u64),
  188. 35 * sizeof(u64));
  189. }
  190. if (!ret) {
  191. unsigned long y = regs->y;
  192. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  193. &y,
  194. 35 * sizeof(u64),
  195. 36 * sizeof(u64));
  196. }
  197. if (!ret)
  198. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  199. 36 * sizeof(u64), -1);
  200. return ret;
  201. }
  202. static int genregs64_set(struct task_struct *target,
  203. const struct user_regset *regset,
  204. unsigned int pos, unsigned int count,
  205. const void *kbuf, const void __user *ubuf)
  206. {
  207. struct pt_regs *regs = task_pt_regs(target);
  208. int ret;
  209. if (target == current)
  210. flushw_user();
  211. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  212. regs->u_regs,
  213. 0, 16 * sizeof(u64));
  214. if (!ret && count > 0) {
  215. unsigned long __user *reg_window = (unsigned long __user *)
  216. (regs->u_regs[UREG_I6] + STACK_BIAS);
  217. unsigned long window[16];
  218. if (copy_from_user(window, reg_window, sizeof(window)))
  219. return -EFAULT;
  220. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  221. window,
  222. 16 * sizeof(u64),
  223. 32 * sizeof(u64));
  224. if (!ret &&
  225. copy_to_user(reg_window, window, sizeof(window)))
  226. return -EFAULT;
  227. }
  228. if (!ret && count > 0) {
  229. unsigned long tstate;
  230. /* TSTATE */
  231. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  232. &tstate,
  233. 32 * sizeof(u64),
  234. 33 * sizeof(u64));
  235. if (!ret) {
  236. /* Only the condition codes can be modified
  237. * in the %tstate register.
  238. */
  239. tstate &= (TSTATE_ICC | TSTATE_XCC);
  240. regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  241. regs->tstate |= tstate;
  242. }
  243. }
  244. if (!ret) {
  245. /* TPC, TNPC */
  246. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  247. &regs->tpc,
  248. 33 * sizeof(u64),
  249. 35 * sizeof(u64));
  250. }
  251. if (!ret) {
  252. unsigned long y;
  253. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  254. &y,
  255. 35 * sizeof(u64),
  256. 36 * sizeof(u64));
  257. if (!ret)
  258. regs->y = y;
  259. }
  260. if (!ret)
  261. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  262. 36 * sizeof(u64), -1);
  263. return ret;
  264. }
  265. static int fpregs64_get(struct task_struct *target,
  266. const struct user_regset *regset,
  267. unsigned int pos, unsigned int count,
  268. void *kbuf, void __user *ubuf)
  269. {
  270. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  271. unsigned long fprs, fsr, gsr;
  272. int ret;
  273. if (target == current)
  274. save_and_clear_fpu();
  275. fprs = task_thread_info(target)->fpsaved[0];
  276. if (fprs & FPRS_DL)
  277. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  278. fpregs,
  279. 0, 16 * sizeof(u64));
  280. else
  281. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  282. 0,
  283. 16 * sizeof(u64));
  284. if (!ret) {
  285. if (fprs & FPRS_DU)
  286. ret = user_regset_copyout(&pos, &count,
  287. &kbuf, &ubuf,
  288. fpregs + 16,
  289. 16 * sizeof(u64),
  290. 32 * sizeof(u64));
  291. else
  292. ret = user_regset_copyout_zero(&pos, &count,
  293. &kbuf, &ubuf,
  294. 16 * sizeof(u64),
  295. 32 * sizeof(u64));
  296. }
  297. if (fprs & FPRS_FEF) {
  298. fsr = task_thread_info(target)->xfsr[0];
  299. gsr = task_thread_info(target)->gsr[0];
  300. } else {
  301. fsr = gsr = 0;
  302. }
  303. if (!ret)
  304. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  305. &fsr,
  306. 32 * sizeof(u64),
  307. 33 * sizeof(u64));
  308. if (!ret)
  309. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  310. &gsr,
  311. 33 * sizeof(u64),
  312. 34 * sizeof(u64));
  313. if (!ret)
  314. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  315. &fprs,
  316. 34 * sizeof(u64),
  317. 35 * sizeof(u64));
  318. if (!ret)
  319. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  320. 35 * sizeof(u64), -1);
  321. return ret;
  322. }
  323. static int fpregs64_set(struct task_struct *target,
  324. const struct user_regset *regset,
  325. unsigned int pos, unsigned int count,
  326. const void *kbuf, const void __user *ubuf)
  327. {
  328. unsigned long *fpregs = task_thread_info(target)->fpregs;
  329. unsigned long fprs;
  330. int ret;
  331. if (target == current)
  332. save_and_clear_fpu();
  333. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  334. fpregs,
  335. 0, 32 * sizeof(u64));
  336. if (!ret)
  337. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  338. task_thread_info(target)->xfsr,
  339. 32 * sizeof(u64),
  340. 33 * sizeof(u64));
  341. if (!ret)
  342. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  343. task_thread_info(target)->gsr,
  344. 33 * sizeof(u64),
  345. 34 * sizeof(u64));
  346. fprs = task_thread_info(target)->fpsaved[0];
  347. if (!ret && count > 0) {
  348. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  349. &fprs,
  350. 34 * sizeof(u64),
  351. 35 * sizeof(u64));
  352. }
  353. fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
  354. task_thread_info(target)->fpsaved[0] = fprs;
  355. if (!ret)
  356. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  357. 35 * sizeof(u64), -1);
  358. return ret;
  359. }
  360. static const struct user_regset sparc64_regsets[] = {
  361. /* Format is:
  362. * G0 --> G7
  363. * O0 --> O7
  364. * L0 --> L7
  365. * I0 --> I7
  366. * TSTATE, TPC, TNPC, Y
  367. */
  368. [REGSET_GENERAL] = {
  369. .core_note_type = NT_PRSTATUS,
  370. .n = 36 * sizeof(u64),
  371. .size = sizeof(u64), .align = sizeof(u64),
  372. .get = genregs64_get, .set = genregs64_set
  373. },
  374. /* Format is:
  375. * F0 --> F63
  376. * FSR
  377. * GSR
  378. * FPRS
  379. */
  380. [REGSET_FP] = {
  381. .core_note_type = NT_PRFPREG,
  382. .n = 35 * sizeof(u64),
  383. .size = sizeof(u64), .align = sizeof(u64),
  384. .get = fpregs64_get, .set = fpregs64_set
  385. },
  386. };
  387. static const struct user_regset_view user_sparc64_view = {
  388. .name = "sparc64", .e_machine = EM_SPARCV9,
  389. .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
  390. };
  391. static int genregs32_get(struct task_struct *target,
  392. const struct user_regset *regset,
  393. unsigned int pos, unsigned int count,
  394. void *kbuf, void __user *ubuf)
  395. {
  396. const struct pt_regs *regs = task_pt_regs(target);
  397. compat_ulong_t __user *reg_window;
  398. compat_ulong_t *k = kbuf;
  399. compat_ulong_t __user *u = ubuf;
  400. compat_ulong_t reg;
  401. if (target == current)
  402. flushw_user();
  403. pos /= sizeof(reg);
  404. count /= sizeof(reg);
  405. if (kbuf) {
  406. for (; count > 0 && pos < 16; count--)
  407. *k++ = regs->u_regs[pos++];
  408. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  409. for (; count > 0 && pos < 32; count--) {
  410. if (get_user(*k++, &reg_window[pos++]))
  411. return -EFAULT;
  412. }
  413. } else {
  414. for (; count > 0 && pos < 16; count--) {
  415. if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
  416. return -EFAULT;
  417. }
  418. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  419. for (; count > 0 && pos < 32; count--) {
  420. if (get_user(reg, &reg_window[pos++]) ||
  421. put_user(reg, u++))
  422. return -EFAULT;
  423. }
  424. }
  425. while (count > 0) {
  426. switch (pos) {
  427. case 32: /* PSR */
  428. reg = tstate_to_psr(regs->tstate);
  429. break;
  430. case 33: /* PC */
  431. reg = regs->tpc;
  432. break;
  433. case 34: /* NPC */
  434. reg = regs->tnpc;
  435. break;
  436. case 35: /* Y */
  437. reg = regs->y;
  438. break;
  439. case 36: /* WIM */
  440. case 37: /* TBR */
  441. reg = 0;
  442. break;
  443. default:
  444. goto finish;
  445. }
  446. if (kbuf)
  447. *k++ = reg;
  448. else if (put_user(reg, u++))
  449. return -EFAULT;
  450. pos++;
  451. count--;
  452. }
  453. finish:
  454. pos *= sizeof(reg);
  455. count *= sizeof(reg);
  456. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  457. 38 * sizeof(reg), -1);
  458. }
  459. static int genregs32_set(struct task_struct *target,
  460. const struct user_regset *regset,
  461. unsigned int pos, unsigned int count,
  462. const void *kbuf, const void __user *ubuf)
  463. {
  464. struct pt_regs *regs = task_pt_regs(target);
  465. compat_ulong_t __user *reg_window;
  466. const compat_ulong_t *k = kbuf;
  467. const compat_ulong_t __user *u = ubuf;
  468. compat_ulong_t reg;
  469. if (target == current)
  470. flushw_user();
  471. pos /= sizeof(reg);
  472. count /= sizeof(reg);
  473. if (kbuf) {
  474. for (; count > 0 && pos < 16; count--)
  475. regs->u_regs[pos++] = *k++;
  476. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  477. for (; count > 0 && pos < 32; count--) {
  478. if (put_user(*k++, &reg_window[pos++]))
  479. return -EFAULT;
  480. }
  481. } else {
  482. for (; count > 0 && pos < 16; count--) {
  483. if (get_user(reg, u++))
  484. return -EFAULT;
  485. regs->u_regs[pos++] = reg;
  486. }
  487. reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
  488. for (; count > 0 && pos < 32; count--) {
  489. if (get_user(reg, u++) ||
  490. put_user(reg, &reg_window[pos++]))
  491. return -EFAULT;
  492. }
  493. }
  494. while (count > 0) {
  495. unsigned long tstate;
  496. if (kbuf)
  497. reg = *k++;
  498. else if (get_user(reg, u++))
  499. return -EFAULT;
  500. switch (pos) {
  501. case 32: /* PSR */
  502. tstate = regs->tstate;
  503. tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  504. tstate |= psr_to_tstate_icc(reg);
  505. regs->tstate = tstate;
  506. break;
  507. case 33: /* PC */
  508. regs->tpc = reg;
  509. break;
  510. case 34: /* NPC */
  511. regs->tnpc = reg;
  512. break;
  513. case 35: /* Y */
  514. regs->y = reg;
  515. break;
  516. case 36: /* WIM */
  517. case 37: /* TBR */
  518. break;
  519. default:
  520. goto finish;
  521. }
  522. pos++;
  523. count--;
  524. }
  525. finish:
  526. pos *= sizeof(reg);
  527. count *= sizeof(reg);
  528. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  529. 38 * sizeof(reg), -1);
  530. }
  531. static int fpregs32_get(struct task_struct *target,
  532. const struct user_regset *regset,
  533. unsigned int pos, unsigned int count,
  534. void *kbuf, void __user *ubuf)
  535. {
  536. const unsigned long *fpregs = task_thread_info(target)->fpregs;
  537. compat_ulong_t enabled;
  538. unsigned long fprs;
  539. compat_ulong_t fsr;
  540. int ret = 0;
  541. if (target == current)
  542. save_and_clear_fpu();
  543. fprs = task_thread_info(target)->fpsaved[0];
  544. if (fprs & FPRS_FEF) {
  545. fsr = task_thread_info(target)->xfsr[0];
  546. enabled = 1;
  547. } else {
  548. fsr = 0;
  549. enabled = 0;
  550. }
  551. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  552. fpregs,
  553. 0, 32 * sizeof(u32));
  554. if (!ret)
  555. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  556. 32 * sizeof(u32),
  557. 33 * sizeof(u32));
  558. if (!ret)
  559. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  560. &fsr,
  561. 33 * sizeof(u32),
  562. 34 * sizeof(u32));
  563. if (!ret) {
  564. compat_ulong_t val;
  565. val = (enabled << 8) | (8 << 16);
  566. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  567. &val,
  568. 34 * sizeof(u32),
  569. 35 * sizeof(u32));
  570. }
  571. if (!ret)
  572. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  573. 35 * sizeof(u32), -1);
  574. return ret;
  575. }
  576. static int fpregs32_set(struct task_struct *target,
  577. const struct user_regset *regset,
  578. unsigned int pos, unsigned int count,
  579. const void *kbuf, const void __user *ubuf)
  580. {
  581. unsigned long *fpregs = task_thread_info(target)->fpregs;
  582. unsigned long fprs;
  583. int ret;
  584. if (target == current)
  585. save_and_clear_fpu();
  586. fprs = task_thread_info(target)->fpsaved[0];
  587. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  588. fpregs,
  589. 0, 32 * sizeof(u32));
  590. if (!ret)
  591. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  592. 32 * sizeof(u32),
  593. 33 * sizeof(u32));
  594. if (!ret && count > 0) {
  595. compat_ulong_t fsr;
  596. unsigned long val;
  597. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  598. &fsr,
  599. 33 * sizeof(u32),
  600. 34 * sizeof(u32));
  601. if (!ret) {
  602. val = task_thread_info(target)->xfsr[0];
  603. val &= 0xffffffff00000000UL;
  604. val |= fsr;
  605. task_thread_info(target)->xfsr[0] = val;
  606. }
  607. }
  608. fprs |= (FPRS_FEF | FPRS_DL);
  609. task_thread_info(target)->fpsaved[0] = fprs;
  610. if (!ret)
  611. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  612. 34 * sizeof(u32), -1);
  613. return ret;
  614. }
  615. static const struct user_regset sparc32_regsets[] = {
  616. /* Format is:
  617. * G0 --> G7
  618. * O0 --> O7
  619. * L0 --> L7
  620. * I0 --> I7
  621. * PSR, PC, nPC, Y, WIM, TBR
  622. */
  623. [REGSET_GENERAL] = {
  624. .core_note_type = NT_PRSTATUS,
  625. .n = 38 * sizeof(u32),
  626. .size = sizeof(u32), .align = sizeof(u32),
  627. .get = genregs32_get, .set = genregs32_set
  628. },
  629. /* Format is:
  630. * F0 --> F31
  631. * empty 32-bit word
  632. * FSR (32--bit word)
  633. * FPU QUEUE COUNT (8-bit char)
  634. * FPU QUEUE ENTRYSIZE (8-bit char)
  635. * FPU ENABLED (8-bit char)
  636. * empty 8-bit char
  637. * FPU QUEUE (64 32-bit ints)
  638. */
  639. [REGSET_FP] = {
  640. .core_note_type = NT_PRFPREG,
  641. .n = 99 * sizeof(u32),
  642. .size = sizeof(u32), .align = sizeof(u32),
  643. .get = fpregs32_get, .set = fpregs32_set
  644. },
  645. };
  646. static const struct user_regset_view user_sparc32_view = {
  647. .name = "sparc", .e_machine = EM_SPARC,
  648. .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
  649. };
  650. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  651. {
  652. if (test_tsk_thread_flag(task, TIF_32BIT))
  653. return &user_sparc32_view;
  654. return &user_sparc64_view;
  655. }
  656. asmlinkage void do_ptrace(struct pt_regs *regs)
  657. {
  658. int request = regs->u_regs[UREG_I0];
  659. pid_t pid = regs->u_regs[UREG_I1];
  660. unsigned long addr = regs->u_regs[UREG_I2];
  661. unsigned long data = regs->u_regs[UREG_I3];
  662. unsigned long addr2 = regs->u_regs[UREG_I4];
  663. struct task_struct *child;
  664. int ret;
  665. if (test_thread_flag(TIF_32BIT)) {
  666. addr &= 0xffffffffUL;
  667. data &= 0xffffffffUL;
  668. addr2 &= 0xffffffffUL;
  669. }
  670. lock_kernel();
  671. #ifdef DEBUG_PTRACE
  672. {
  673. char *s;
  674. if ((request >= 0) && (request <= 24))
  675. s = pt_rq [request];
  676. else
  677. s = "unknown";
  678. if (request == PTRACE_POKEDATA && data == 0x91d02001){
  679. printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
  680. pid, addr, addr2);
  681. } else
  682. printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
  683. s, request, pid, addr, data, addr2);
  684. }
  685. #endif
  686. if (request == PTRACE_TRACEME) {
  687. ret = ptrace_traceme();
  688. if (ret < 0)
  689. pt_error_return(regs, -ret);
  690. else
  691. pt_succ_return(regs, 0);
  692. goto out;
  693. }
  694. child = ptrace_get_task_struct(pid);
  695. if (IS_ERR(child)) {
  696. ret = PTR_ERR(child);
  697. pt_error_return(regs, -ret);
  698. goto out;
  699. }
  700. if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
  701. || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
  702. if (ptrace_attach(child)) {
  703. pt_error_return(regs, EPERM);
  704. goto out_tsk;
  705. }
  706. pt_succ_return(regs, 0);
  707. goto out_tsk;
  708. }
  709. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  710. if (ret < 0) {
  711. pt_error_return(regs, -ret);
  712. goto out_tsk;
  713. }
  714. if (!(test_thread_flag(TIF_32BIT)) &&
  715. ((request == PTRACE_READDATA64) ||
  716. (request == PTRACE_WRITEDATA64) ||
  717. (request == PTRACE_READTEXT64) ||
  718. (request == PTRACE_WRITETEXT64) ||
  719. (request == PTRACE_PEEKTEXT64) ||
  720. (request == PTRACE_POKETEXT64) ||
  721. (request == PTRACE_PEEKDATA64) ||
  722. (request == PTRACE_POKEDATA64))) {
  723. addr = regs->u_regs[UREG_G2];
  724. addr2 = regs->u_regs[UREG_G3];
  725. request -= 30; /* wheee... */
  726. }
  727. switch(request) {
  728. case PTRACE_PEEKUSR:
  729. if (addr != 0)
  730. pt_error_return(regs, EIO);
  731. else
  732. pt_succ_return(regs, 0);
  733. goto out_tsk;
  734. case PTRACE_PEEKTEXT: /* read word at location addr. */
  735. case PTRACE_PEEKDATA: {
  736. unsigned long tmp64;
  737. unsigned int tmp32;
  738. int res, copied;
  739. res = -EIO;
  740. if (test_thread_flag(TIF_32BIT)) {
  741. copied = access_process_vm(child, addr,
  742. &tmp32, sizeof(tmp32), 0);
  743. tmp64 = (unsigned long) tmp32;
  744. if (copied == sizeof(tmp32))
  745. res = 0;
  746. } else {
  747. copied = access_process_vm(child, addr,
  748. &tmp64, sizeof(tmp64), 0);
  749. if (copied == sizeof(tmp64))
  750. res = 0;
  751. }
  752. if (res < 0)
  753. pt_error_return(regs, -res);
  754. else
  755. pt_os_succ_return(regs, tmp64, (void __user *) data);
  756. goto out_tsk;
  757. }
  758. case PTRACE_POKETEXT: /* write the word at location addr. */
  759. case PTRACE_POKEDATA: {
  760. unsigned long tmp64;
  761. unsigned int tmp32;
  762. int copied, res = -EIO;
  763. if (test_thread_flag(TIF_32BIT)) {
  764. tmp32 = data;
  765. copied = access_process_vm(child, addr,
  766. &tmp32, sizeof(tmp32), 1);
  767. if (copied == sizeof(tmp32))
  768. res = 0;
  769. } else {
  770. tmp64 = data;
  771. copied = access_process_vm(child, addr,
  772. &tmp64, sizeof(tmp64), 1);
  773. if (copied == sizeof(tmp64))
  774. res = 0;
  775. }
  776. if (res < 0)
  777. pt_error_return(regs, -res);
  778. else
  779. pt_succ_return(regs, res);
  780. goto out_tsk;
  781. }
  782. case PTRACE_GETREGS: {
  783. struct pt_regs32 __user *pregs =
  784. (struct pt_regs32 __user *) addr;
  785. struct pt_regs *cregs = task_pt_regs(child);
  786. int rval;
  787. if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
  788. __put_user(cregs->tpc, (&pregs->pc)) ||
  789. __put_user(cregs->tnpc, (&pregs->npc)) ||
  790. __put_user(cregs->y, (&pregs->y))) {
  791. pt_error_return(regs, EFAULT);
  792. goto out_tsk;
  793. }
  794. for (rval = 1; rval < 16; rval++)
  795. if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
  796. pt_error_return(regs, EFAULT);
  797. goto out_tsk;
  798. }
  799. pt_succ_return(regs, 0);
  800. #ifdef DEBUG_PTRACE
  801. printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
  802. #endif
  803. goto out_tsk;
  804. }
  805. case PTRACE_GETREGS64: {
  806. struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
  807. struct pt_regs *cregs = task_pt_regs(child);
  808. unsigned long tpc = cregs->tpc;
  809. int rval;
  810. if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
  811. tpc &= 0xffffffff;
  812. if (__put_user(cregs->tstate, (&pregs->tstate)) ||
  813. __put_user(tpc, (&pregs->tpc)) ||
  814. __put_user(cregs->tnpc, (&pregs->tnpc)) ||
  815. __put_user(cregs->y, (&pregs->y))) {
  816. pt_error_return(regs, EFAULT);
  817. goto out_tsk;
  818. }
  819. for (rval = 1; rval < 16; rval++)
  820. if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
  821. pt_error_return(regs, EFAULT);
  822. goto out_tsk;
  823. }
  824. pt_succ_return(regs, 0);
  825. #ifdef DEBUG_PTRACE
  826. printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
  827. #endif
  828. goto out_tsk;
  829. }
  830. case PTRACE_SETREGS: {
  831. struct pt_regs32 __user *pregs =
  832. (struct pt_regs32 __user *) addr;
  833. struct pt_regs *cregs = task_pt_regs(child);
  834. unsigned int psr, pc, npc, y;
  835. int i;
  836. /* Must be careful, tracing process can only set certain
  837. * bits in the psr.
  838. */
  839. if (__get_user(psr, (&pregs->psr)) ||
  840. __get_user(pc, (&pregs->pc)) ||
  841. __get_user(npc, (&pregs->npc)) ||
  842. __get_user(y, (&pregs->y))) {
  843. pt_error_return(regs, EFAULT);
  844. goto out_tsk;
  845. }
  846. cregs->tstate &= ~(TSTATE_ICC);
  847. cregs->tstate |= psr_to_tstate_icc(psr);
  848. if (!((pc | npc) & 3)) {
  849. cregs->tpc = pc;
  850. cregs->tnpc = npc;
  851. }
  852. cregs->y = y;
  853. for (i = 1; i < 16; i++) {
  854. if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
  855. pt_error_return(regs, EFAULT);
  856. goto out_tsk;
  857. }
  858. }
  859. pt_succ_return(regs, 0);
  860. goto out_tsk;
  861. }
  862. case PTRACE_SETREGS64: {
  863. struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
  864. struct pt_regs *cregs = task_pt_regs(child);
  865. unsigned long tstate, tpc, tnpc, y;
  866. int i;
  867. /* Must be careful, tracing process can only set certain
  868. * bits in the psr.
  869. */
  870. if (__get_user(tstate, (&pregs->tstate)) ||
  871. __get_user(tpc, (&pregs->tpc)) ||
  872. __get_user(tnpc, (&pregs->tnpc)) ||
  873. __get_user(y, (&pregs->y))) {
  874. pt_error_return(regs, EFAULT);
  875. goto out_tsk;
  876. }
  877. if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
  878. tpc &= 0xffffffff;
  879. tnpc &= 0xffffffff;
  880. }
  881. tstate &= (TSTATE_ICC | TSTATE_XCC);
  882. cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
  883. cregs->tstate |= tstate;
  884. if (!((tpc | tnpc) & 3)) {
  885. cregs->tpc = tpc;
  886. cregs->tnpc = tnpc;
  887. }
  888. cregs->y = y;
  889. for (i = 1; i < 16; i++) {
  890. if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
  891. pt_error_return(regs, EFAULT);
  892. goto out_tsk;
  893. }
  894. }
  895. pt_succ_return(regs, 0);
  896. goto out_tsk;
  897. }
  898. case PTRACE_GETFPREGS: {
  899. struct fps {
  900. unsigned int regs[32];
  901. unsigned int fsr;
  902. unsigned int flags;
  903. unsigned int extra;
  904. unsigned int fpqd;
  905. struct fq {
  906. unsigned int insnaddr;
  907. unsigned int insn;
  908. } fpq[16];
  909. };
  910. struct fps __user *fps = (struct fps __user *) addr;
  911. unsigned long *fpregs = task_thread_info(child)->fpregs;
  912. if (copy_to_user(&fps->regs[0], fpregs,
  913. (32 * sizeof(unsigned int))) ||
  914. __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
  915. __put_user(0, (&fps->fpqd)) ||
  916. __put_user(0, (&fps->flags)) ||
  917. __put_user(0, (&fps->extra)) ||
  918. clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
  919. pt_error_return(regs, EFAULT);
  920. goto out_tsk;
  921. }
  922. pt_succ_return(regs, 0);
  923. goto out_tsk;
  924. }
  925. case PTRACE_GETFPREGS64: {
  926. struct fps {
  927. unsigned int regs[64];
  928. unsigned long fsr;
  929. };
  930. struct fps __user *fps = (struct fps __user *) addr;
  931. unsigned long *fpregs = task_thread_info(child)->fpregs;
  932. if (copy_to_user(&fps->regs[0], fpregs,
  933. (64 * sizeof(unsigned int))) ||
  934. __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
  935. pt_error_return(regs, EFAULT);
  936. goto out_tsk;
  937. }
  938. pt_succ_return(regs, 0);
  939. goto out_tsk;
  940. }
  941. case PTRACE_SETFPREGS: {
  942. struct fps {
  943. unsigned int regs[32];
  944. unsigned int fsr;
  945. unsigned int flags;
  946. unsigned int extra;
  947. unsigned int fpqd;
  948. struct fq {
  949. unsigned int insnaddr;
  950. unsigned int insn;
  951. } fpq[16];
  952. };
  953. struct fps __user *fps = (struct fps __user *) addr;
  954. unsigned long *fpregs = task_thread_info(child)->fpregs;
  955. unsigned fsr;
  956. if (copy_from_user(fpregs, &fps->regs[0],
  957. (32 * sizeof(unsigned int))) ||
  958. __get_user(fsr, (&fps->fsr))) {
  959. pt_error_return(regs, EFAULT);
  960. goto out_tsk;
  961. }
  962. task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
  963. task_thread_info(child)->xfsr[0] |= fsr;
  964. if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
  965. task_thread_info(child)->gsr[0] = 0;
  966. task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
  967. pt_succ_return(regs, 0);
  968. goto out_tsk;
  969. }
  970. case PTRACE_SETFPREGS64: {
  971. struct fps {
  972. unsigned int regs[64];
  973. unsigned long fsr;
  974. };
  975. struct fps __user *fps = (struct fps __user *) addr;
  976. unsigned long *fpregs = task_thread_info(child)->fpregs;
  977. if (copy_from_user(fpregs, &fps->regs[0],
  978. (64 * sizeof(unsigned int))) ||
  979. __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
  980. pt_error_return(regs, EFAULT);
  981. goto out_tsk;
  982. }
  983. if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
  984. task_thread_info(child)->gsr[0] = 0;
  985. task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
  986. pt_succ_return(regs, 0);
  987. goto out_tsk;
  988. }
  989. case PTRACE_READTEXT:
  990. case PTRACE_READDATA: {
  991. int res = ptrace_readdata(child, addr,
  992. (char __user *)addr2, data);
  993. if (res == data) {
  994. pt_succ_return(regs, 0);
  995. goto out_tsk;
  996. }
  997. if (res >= 0)
  998. res = -EIO;
  999. pt_error_return(regs, -res);
  1000. goto out_tsk;
  1001. }
  1002. case PTRACE_WRITETEXT:
  1003. case PTRACE_WRITEDATA: {
  1004. int res = ptrace_writedata(child, (char __user *) addr2,
  1005. addr, data);
  1006. if (res == data) {
  1007. pt_succ_return(regs, 0);
  1008. goto out_tsk;
  1009. }
  1010. if (res >= 0)
  1011. res = -EIO;
  1012. pt_error_return(regs, -res);
  1013. goto out_tsk;
  1014. }
  1015. case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
  1016. addr = 1;
  1017. case PTRACE_CONT: { /* restart after signal. */
  1018. if (!valid_signal(data)) {
  1019. pt_error_return(regs, EIO);
  1020. goto out_tsk;
  1021. }
  1022. if (request == PTRACE_SYSCALL) {
  1023. set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  1024. } else {
  1025. clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
  1026. }
  1027. child->exit_code = data;
  1028. #ifdef DEBUG_PTRACE
  1029. printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
  1030. child->pid, child->exit_code,
  1031. task_pt_regs(child)->tpc,
  1032. task_pt_regs(child)->tnpc);
  1033. #endif
  1034. wake_up_process(child);
  1035. pt_succ_return(regs, 0);
  1036. goto out_tsk;
  1037. }
  1038. /*
  1039. * make the child exit. Best I can do is send it a sigkill.
  1040. * perhaps it should be put in the status that it wants to
  1041. * exit.
  1042. */
  1043. case PTRACE_KILL: {
  1044. if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
  1045. pt_succ_return(regs, 0);
  1046. goto out_tsk;
  1047. }
  1048. child->exit_code = SIGKILL;
  1049. wake_up_process(child);
  1050. pt_succ_return(regs, 0);
  1051. goto out_tsk;
  1052. }
  1053. case PTRACE_SUNDETACH: { /* detach a process that was attached. */
  1054. int error = ptrace_detach(child, data);
  1055. if (error) {
  1056. pt_error_return(regs, EIO);
  1057. goto out_tsk;
  1058. }
  1059. pt_succ_return(regs, 0);
  1060. goto out_tsk;
  1061. }
  1062. /* PTRACE_DUMPCORE unsupported... */
  1063. case PTRACE_GETEVENTMSG: {
  1064. int err;
  1065. if (test_thread_flag(TIF_32BIT))
  1066. err = put_user(child->ptrace_message,
  1067. (unsigned int __user *) data);
  1068. else
  1069. err = put_user(child->ptrace_message,
  1070. (unsigned long __user *) data);
  1071. if (err)
  1072. pt_error_return(regs, -err);
  1073. else
  1074. pt_succ_return(regs, 0);
  1075. break;
  1076. }
  1077. default: {
  1078. int err = ptrace_request(child, request, addr, data);
  1079. if (err)
  1080. pt_error_return(regs, -err);
  1081. else
  1082. pt_succ_return(regs, 0);
  1083. goto out_tsk;
  1084. }
  1085. }
  1086. out_tsk:
  1087. if (child)
  1088. put_task_struct(child);
  1089. out:
  1090. unlock_kernel();
  1091. }
  1092. asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
  1093. {
  1094. /* do the secure computing check first */
  1095. secure_computing(regs->u_regs[UREG_G1]);
  1096. if (unlikely(current->audit_context) && syscall_exit_p) {
  1097. unsigned long tstate = regs->tstate;
  1098. int result = AUDITSC_SUCCESS;
  1099. if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
  1100. result = AUDITSC_FAILURE;
  1101. audit_syscall_exit(result, regs->u_regs[UREG_I0]);
  1102. }
  1103. if (!(current->ptrace & PT_PTRACED))
  1104. goto out;
  1105. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  1106. goto out;
  1107. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  1108. ? 0x80 : 0));
  1109. /*
  1110. * this isn't the same as continuing with a signal, but it will do
  1111. * for normal use. strace only continues with a signal if the
  1112. * stopping signal is not SIGTRAP. -brl
  1113. */
  1114. if (current->exit_code) {
  1115. send_sig(current->exit_code, current, 1);
  1116. current->exit_code = 0;
  1117. }
  1118. out:
  1119. if (unlikely(current->audit_context) && !syscall_exit_p)
  1120. audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
  1121. AUDIT_ARCH_SPARC :
  1122. AUDIT_ARCH_SPARC64),
  1123. regs->u_regs[UREG_G1],
  1124. regs->u_regs[UREG_I0],
  1125. regs->u_regs[UREG_I1],
  1126. regs->u_regs[UREG_I2],
  1127. regs->u_regs[UREG_I3]);
  1128. }