ptrace.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Derived from "arch/m68k/kernel/ptrace.c"
  6. * Copyright (C) 1994 by Hamish Macdonald
  7. * Taken from linux/kernel/ptrace.c and modified for M680x0.
  8. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  9. *
  10. * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11. * and Paul Mackerras (paulus@samba.org).
  12. *
  13. * This file is subject to the terms and conditions of the GNU General
  14. * Public License. See the file README.legal in the main directory of
  15. * this archive for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/errno.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/regset.h>
  24. #include <linux/elf.h>
  25. #include <linux/user.h>
  26. #include <linux/security.h>
  27. #include <linux/signal.h>
  28. #include <linux/seccomp.h>
  29. #include <linux/audit.h>
  30. #ifdef CONFIG_PPC32
  31. #include <linux/module.h>
  32. #endif
  33. #include <asm/uaccess.h>
  34. #include <asm/page.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/system.h>
  37. /*
  38. * does not yet catch signals sent when the child dies.
  39. * in exit.c or in signal.c.
  40. */
  41. /*
  42. * Set of msr bits that gdb can change on behalf of a process.
  43. */
  44. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  45. #define MSR_DEBUGCHANGE 0
  46. #else
  47. #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
  48. #endif
  49. /*
  50. * Max register writeable via put_reg
  51. */
  52. #ifdef CONFIG_PPC32
  53. #define PT_MAX_PUT_REG PT_MQ
  54. #else
  55. #define PT_MAX_PUT_REG PT_CCR
  56. #endif
  57. static unsigned long get_user_msr(struct task_struct *task)
  58. {
  59. return task->thread.regs->msr | task->thread.fpexc_mode;
  60. }
  61. static int set_user_msr(struct task_struct *task, unsigned long msr)
  62. {
  63. task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
  64. task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
  65. return 0;
  66. }
  67. /*
  68. * We prevent mucking around with the reserved area of trap
  69. * which are used internally by the kernel.
  70. */
  71. static int set_user_trap(struct task_struct *task, unsigned long trap)
  72. {
  73. task->thread.regs->trap = trap & 0xfff0;
  74. return 0;
  75. }
  76. /*
  77. * Get contents of register REGNO in task TASK.
  78. */
  79. unsigned long ptrace_get_reg(struct task_struct *task, int regno)
  80. {
  81. if (task->thread.regs == NULL)
  82. return -EIO;
  83. if (regno == PT_MSR)
  84. return get_user_msr(task);
  85. if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
  86. return ((unsigned long *)task->thread.regs)[regno];
  87. return -EIO;
  88. }
  89. /*
  90. * Write contents of register REGNO in task TASK.
  91. */
  92. int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
  93. {
  94. if (task->thread.regs == NULL)
  95. return -EIO;
  96. if (regno == PT_MSR)
  97. return set_user_msr(task, data);
  98. if (regno == PT_TRAP)
  99. return set_user_trap(task, data);
  100. if (regno <= PT_MAX_PUT_REG) {
  101. ((unsigned long *)task->thread.regs)[regno] = data;
  102. return 0;
  103. }
  104. return -EIO;
  105. }
  106. static int gpr_get(struct task_struct *target, const struct user_regset *regset,
  107. unsigned int pos, unsigned int count,
  108. void *kbuf, void __user *ubuf)
  109. {
  110. int ret;
  111. if (target->thread.regs == NULL)
  112. return -EIO;
  113. CHECK_FULL_REGS(target->thread.regs);
  114. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  115. target->thread.regs,
  116. 0, offsetof(struct pt_regs, msr));
  117. if (!ret) {
  118. unsigned long msr = get_user_msr(target);
  119. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
  120. offsetof(struct pt_regs, msr),
  121. offsetof(struct pt_regs, msr) +
  122. sizeof(msr));
  123. }
  124. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  125. offsetof(struct pt_regs, msr) + sizeof(long));
  126. if (!ret)
  127. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  128. &target->thread.regs->orig_gpr3,
  129. offsetof(struct pt_regs, orig_gpr3),
  130. sizeof(struct pt_regs));
  131. if (!ret)
  132. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  133. sizeof(struct pt_regs), -1);
  134. return ret;
  135. }
  136. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  137. unsigned int pos, unsigned int count,
  138. const void *kbuf, const void __user *ubuf)
  139. {
  140. unsigned long reg;
  141. int ret;
  142. if (target->thread.regs == NULL)
  143. return -EIO;
  144. CHECK_FULL_REGS(target->thread.regs);
  145. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  146. target->thread.regs,
  147. 0, PT_MSR * sizeof(reg));
  148. if (!ret && count > 0) {
  149. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  150. PT_MSR * sizeof(reg),
  151. (PT_MSR + 1) * sizeof(reg));
  152. if (!ret)
  153. ret = set_user_msr(target, reg);
  154. }
  155. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  156. offsetof(struct pt_regs, msr) + sizeof(long));
  157. if (!ret)
  158. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  159. &target->thread.regs->orig_gpr3,
  160. PT_ORIG_R3 * sizeof(reg),
  161. (PT_MAX_PUT_REG + 1) * sizeof(reg));
  162. if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
  163. ret = user_regset_copyin_ignore(
  164. &pos, &count, &kbuf, &ubuf,
  165. (PT_MAX_PUT_REG + 1) * sizeof(reg),
  166. PT_TRAP * sizeof(reg));
  167. if (!ret && count > 0) {
  168. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  169. PT_TRAP * sizeof(reg),
  170. (PT_TRAP + 1) * sizeof(reg));
  171. if (!ret)
  172. ret = set_user_trap(target, reg);
  173. }
  174. if (!ret)
  175. ret = user_regset_copyin_ignore(
  176. &pos, &count, &kbuf, &ubuf,
  177. (PT_TRAP + 1) * sizeof(reg), -1);
  178. return ret;
  179. }
  180. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  181. unsigned int pos, unsigned int count,
  182. void *kbuf, void __user *ubuf)
  183. {
  184. #ifdef CONFIG_VSX
  185. double buf[33];
  186. int i;
  187. #endif
  188. flush_fp_to_thread(target);
  189. #ifdef CONFIG_VSX
  190. /* copy to local buffer then write that out */
  191. for (i = 0; i < 32 ; i++)
  192. buf[i] = target->thread.TS_FPR(i);
  193. memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
  194. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  195. #else
  196. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  197. offsetof(struct thread_struct, TS_FPR(32)));
  198. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  199. &target->thread.fpr, 0, -1);
  200. #endif
  201. }
  202. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  203. unsigned int pos, unsigned int count,
  204. const void *kbuf, const void __user *ubuf)
  205. {
  206. #ifdef CONFIG_VSX
  207. double buf[33];
  208. int i;
  209. #endif
  210. flush_fp_to_thread(target);
  211. #ifdef CONFIG_VSX
  212. /* copy to local buffer then write that out */
  213. i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  214. if (i)
  215. return i;
  216. for (i = 0; i < 32 ; i++)
  217. target->thread.TS_FPR(i) = buf[i];
  218. memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
  219. return 0;
  220. #else
  221. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  222. offsetof(struct thread_struct, TS_FPR(32)));
  223. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  224. &target->thread.fpr, 0, -1);
  225. #endif
  226. }
  227. #ifdef CONFIG_ALTIVEC
  228. /*
  229. * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
  230. * The transfer totals 34 quadword. Quadwords 0-31 contain the
  231. * corresponding vector registers. Quadword 32 contains the vscr as the
  232. * last word (offset 12) within that quadword. Quadword 33 contains the
  233. * vrsave as the first word (offset 0) within the quadword.
  234. *
  235. * This definition of the VMX state is compatible with the current PPC32
  236. * ptrace interface. This allows signal handling and ptrace to use the
  237. * same structures. This also simplifies the implementation of a bi-arch
  238. * (combined (32- and 64-bit) gdb.
  239. */
  240. static int vr_active(struct task_struct *target,
  241. const struct user_regset *regset)
  242. {
  243. flush_altivec_to_thread(target);
  244. return target->thread.used_vr ? regset->n : 0;
  245. }
  246. static int vr_get(struct task_struct *target, const struct user_regset *regset,
  247. unsigned int pos, unsigned int count,
  248. void *kbuf, void __user *ubuf)
  249. {
  250. int ret;
  251. flush_altivec_to_thread(target);
  252. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  253. offsetof(struct thread_struct, vr[32]));
  254. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  255. &target->thread.vr, 0,
  256. 33 * sizeof(vector128));
  257. if (!ret) {
  258. /*
  259. * Copy out only the low-order word of vrsave.
  260. */
  261. union {
  262. elf_vrreg_t reg;
  263. u32 word;
  264. } vrsave;
  265. memset(&vrsave, 0, sizeof(vrsave));
  266. vrsave.word = target->thread.vrsave;
  267. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  268. 33 * sizeof(vector128), -1);
  269. }
  270. return ret;
  271. }
  272. static int vr_set(struct task_struct *target, const struct user_regset *regset,
  273. unsigned int pos, unsigned int count,
  274. const void *kbuf, const void __user *ubuf)
  275. {
  276. int ret;
  277. flush_altivec_to_thread(target);
  278. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  279. offsetof(struct thread_struct, vr[32]));
  280. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  281. &target->thread.vr, 0, 33 * sizeof(vector128));
  282. if (!ret && count > 0) {
  283. /*
  284. * We use only the first word of vrsave.
  285. */
  286. union {
  287. elf_vrreg_t reg;
  288. u32 word;
  289. } vrsave;
  290. memset(&vrsave, 0, sizeof(vrsave));
  291. vrsave.word = target->thread.vrsave;
  292. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
  293. 33 * sizeof(vector128), -1);
  294. if (!ret)
  295. target->thread.vrsave = vrsave.word;
  296. }
  297. return ret;
  298. }
  299. #endif /* CONFIG_ALTIVEC */
  300. #ifdef CONFIG_VSX
  301. /*
  302. * Currently to set and and get all the vsx state, you need to call
  303. * the fp and VMX calls aswell. This only get/sets the lower 32
  304. * 128bit VSX registers.
  305. */
  306. static int vsr_active(struct task_struct *target,
  307. const struct user_regset *regset)
  308. {
  309. flush_vsx_to_thread(target);
  310. return target->thread.used_vsr ? regset->n : 0;
  311. }
  312. static int vsr_get(struct task_struct *target, const struct user_regset *regset,
  313. unsigned int pos, unsigned int count,
  314. void *kbuf, void __user *ubuf)
  315. {
  316. double buf[32];
  317. int ret, i;
  318. flush_vsx_to_thread(target);
  319. for (i = 0; i < 32 ; i++)
  320. buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
  321. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  322. buf, 0, 32 * sizeof(double));
  323. return ret;
  324. }
  325. static int vsr_set(struct task_struct *target, const struct user_regset *regset,
  326. unsigned int pos, unsigned int count,
  327. const void *kbuf, const void __user *ubuf)
  328. {
  329. double buf[32];
  330. int ret,i;
  331. flush_vsx_to_thread(target);
  332. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  333. buf, 0, 32 * sizeof(double));
  334. for (i = 0; i < 32 ; i++)
  335. current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
  336. return ret;
  337. }
  338. #endif /* CONFIG_VSX */
  339. #ifdef CONFIG_SPE
  340. /*
  341. * For get_evrregs/set_evrregs functions 'data' has the following layout:
  342. *
  343. * struct {
  344. * u32 evr[32];
  345. * u64 acc;
  346. * u32 spefscr;
  347. * }
  348. */
  349. static int evr_active(struct task_struct *target,
  350. const struct user_regset *regset)
  351. {
  352. flush_spe_to_thread(target);
  353. return target->thread.used_spe ? regset->n : 0;
  354. }
  355. static int evr_get(struct task_struct *target, const struct user_regset *regset,
  356. unsigned int pos, unsigned int count,
  357. void *kbuf, void __user *ubuf)
  358. {
  359. int ret;
  360. flush_spe_to_thread(target);
  361. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  362. &target->thread.evr,
  363. 0, sizeof(target->thread.evr));
  364. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  365. offsetof(struct thread_struct, spefscr));
  366. if (!ret)
  367. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  368. &target->thread.acc,
  369. sizeof(target->thread.evr), -1);
  370. return ret;
  371. }
  372. static int evr_set(struct task_struct *target, const struct user_regset *regset,
  373. unsigned int pos, unsigned int count,
  374. const void *kbuf, const void __user *ubuf)
  375. {
  376. int ret;
  377. flush_spe_to_thread(target);
  378. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  379. &target->thread.evr,
  380. 0, sizeof(target->thread.evr));
  381. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  382. offsetof(struct thread_struct, spefscr));
  383. if (!ret)
  384. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  385. &target->thread.acc,
  386. sizeof(target->thread.evr), -1);
  387. return ret;
  388. }
  389. #endif /* CONFIG_SPE */
  390. /*
  391. * These are our native regset flavors.
  392. */
  393. enum powerpc_regset {
  394. REGSET_GPR,
  395. REGSET_FPR,
  396. #ifdef CONFIG_ALTIVEC
  397. REGSET_VMX,
  398. #endif
  399. #ifdef CONFIG_VSX
  400. REGSET_VSX,
  401. #endif
  402. #ifdef CONFIG_SPE
  403. REGSET_SPE,
  404. #endif
  405. };
  406. static const struct user_regset native_regsets[] = {
  407. [REGSET_GPR] = {
  408. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  409. .size = sizeof(long), .align = sizeof(long),
  410. .get = gpr_get, .set = gpr_set
  411. },
  412. [REGSET_FPR] = {
  413. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  414. .size = sizeof(double), .align = sizeof(double),
  415. .get = fpr_get, .set = fpr_set
  416. },
  417. #ifdef CONFIG_ALTIVEC
  418. [REGSET_VMX] = {
  419. .core_note_type = NT_PPC_VMX, .n = 34,
  420. .size = sizeof(vector128), .align = sizeof(vector128),
  421. .active = vr_active, .get = vr_get, .set = vr_set
  422. },
  423. #endif
  424. #ifdef CONFIG_VSX
  425. [REGSET_VSX] = {
  426. .core_note_type = NT_PPC_VSX, .n = 32,
  427. .size = sizeof(double), .align = sizeof(double),
  428. .active = vsr_active, .get = vsr_get, .set = vsr_set
  429. },
  430. #endif
  431. #ifdef CONFIG_SPE
  432. [REGSET_SPE] = {
  433. .n = 35,
  434. .size = sizeof(u32), .align = sizeof(u32),
  435. .active = evr_active, .get = evr_get, .set = evr_set
  436. },
  437. #endif
  438. };
  439. static const struct user_regset_view user_ppc_native_view = {
  440. .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  441. .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
  442. };
  443. #ifdef CONFIG_PPC64
  444. #include <linux/compat.h>
  445. static int gpr32_get(struct task_struct *target,
  446. const struct user_regset *regset,
  447. unsigned int pos, unsigned int count,
  448. void *kbuf, void __user *ubuf)
  449. {
  450. const unsigned long *regs = &target->thread.regs->gpr[0];
  451. compat_ulong_t *k = kbuf;
  452. compat_ulong_t __user *u = ubuf;
  453. compat_ulong_t reg;
  454. if (target->thread.regs == NULL)
  455. return -EIO;
  456. CHECK_FULL_REGS(target->thread.regs);
  457. pos /= sizeof(reg);
  458. count /= sizeof(reg);
  459. if (kbuf)
  460. for (; count > 0 && pos < PT_MSR; --count)
  461. *k++ = regs[pos++];
  462. else
  463. for (; count > 0 && pos < PT_MSR; --count)
  464. if (__put_user((compat_ulong_t) regs[pos++], u++))
  465. return -EFAULT;
  466. if (count > 0 && pos == PT_MSR) {
  467. reg = get_user_msr(target);
  468. if (kbuf)
  469. *k++ = reg;
  470. else if (__put_user(reg, u++))
  471. return -EFAULT;
  472. ++pos;
  473. --count;
  474. }
  475. if (kbuf)
  476. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  477. *k++ = regs[pos++];
  478. else
  479. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  480. if (__put_user((compat_ulong_t) regs[pos++], u++))
  481. return -EFAULT;
  482. kbuf = k;
  483. ubuf = u;
  484. pos *= sizeof(reg);
  485. count *= sizeof(reg);
  486. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  487. PT_REGS_COUNT * sizeof(reg), -1);
  488. }
  489. static int gpr32_set(struct task_struct *target,
  490. const struct user_regset *regset,
  491. unsigned int pos, unsigned int count,
  492. const void *kbuf, const void __user *ubuf)
  493. {
  494. unsigned long *regs = &target->thread.regs->gpr[0];
  495. const compat_ulong_t *k = kbuf;
  496. const compat_ulong_t __user *u = ubuf;
  497. compat_ulong_t reg;
  498. if (target->thread.regs == NULL)
  499. return -EIO;
  500. CHECK_FULL_REGS(target->thread.regs);
  501. pos /= sizeof(reg);
  502. count /= sizeof(reg);
  503. if (kbuf)
  504. for (; count > 0 && pos < PT_MSR; --count)
  505. regs[pos++] = *k++;
  506. else
  507. for (; count > 0 && pos < PT_MSR; --count) {
  508. if (__get_user(reg, u++))
  509. return -EFAULT;
  510. regs[pos++] = reg;
  511. }
  512. if (count > 0 && pos == PT_MSR) {
  513. if (kbuf)
  514. reg = *k++;
  515. else if (__get_user(reg, u++))
  516. return -EFAULT;
  517. set_user_msr(target, reg);
  518. ++pos;
  519. --count;
  520. }
  521. if (kbuf) {
  522. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
  523. regs[pos++] = *k++;
  524. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  525. ++k;
  526. } else {
  527. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
  528. if (__get_user(reg, u++))
  529. return -EFAULT;
  530. regs[pos++] = reg;
  531. }
  532. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  533. if (__get_user(reg, u++))
  534. return -EFAULT;
  535. }
  536. if (count > 0 && pos == PT_TRAP) {
  537. if (kbuf)
  538. reg = *k++;
  539. else if (__get_user(reg, u++))
  540. return -EFAULT;
  541. set_user_trap(target, reg);
  542. ++pos;
  543. --count;
  544. }
  545. kbuf = k;
  546. ubuf = u;
  547. pos *= sizeof(reg);
  548. count *= sizeof(reg);
  549. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  550. (PT_TRAP + 1) * sizeof(reg), -1);
  551. }
  552. /*
  553. * These are the regset flavors matching the CONFIG_PPC32 native set.
  554. */
  555. static const struct user_regset compat_regsets[] = {
  556. [REGSET_GPR] = {
  557. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  558. .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
  559. .get = gpr32_get, .set = gpr32_set
  560. },
  561. [REGSET_FPR] = {
  562. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  563. .size = sizeof(double), .align = sizeof(double),
  564. .get = fpr_get, .set = fpr_set
  565. },
  566. #ifdef CONFIG_ALTIVEC
  567. [REGSET_VMX] = {
  568. .core_note_type = NT_PPC_VMX, .n = 34,
  569. .size = sizeof(vector128), .align = sizeof(vector128),
  570. .active = vr_active, .get = vr_get, .set = vr_set
  571. },
  572. #endif
  573. #ifdef CONFIG_SPE
  574. [REGSET_SPE] = {
  575. .core_note_type = NT_PPC_SPE, .n = 35,
  576. .size = sizeof(u32), .align = sizeof(u32),
  577. .active = evr_active, .get = evr_get, .set = evr_set
  578. },
  579. #endif
  580. };
  581. static const struct user_regset_view user_ppc_compat_view = {
  582. .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
  583. .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
  584. };
  585. #endif /* CONFIG_PPC64 */
  586. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  587. {
  588. #ifdef CONFIG_PPC64
  589. if (test_tsk_thread_flag(task, TIF_32BIT))
  590. return &user_ppc_compat_view;
  591. #endif
  592. return &user_ppc_native_view;
  593. }
  594. void user_enable_single_step(struct task_struct *task)
  595. {
  596. struct pt_regs *regs = task->thread.regs;
  597. if (regs != NULL) {
  598. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  599. task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
  600. regs->msr |= MSR_DE;
  601. #else
  602. regs->msr |= MSR_SE;
  603. #endif
  604. }
  605. set_tsk_thread_flag(task, TIF_SINGLESTEP);
  606. }
  607. void user_disable_single_step(struct task_struct *task)
  608. {
  609. struct pt_regs *regs = task->thread.regs;
  610. if (regs != NULL) {
  611. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  612. task->thread.dbcr0 = 0;
  613. regs->msr &= ~MSR_DE;
  614. #else
  615. regs->msr &= ~MSR_SE;
  616. #endif
  617. }
  618. clear_tsk_thread_flag(task, TIF_SINGLESTEP);
  619. }
  620. static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
  621. unsigned long data)
  622. {
  623. /* We only support one DABR and no IABRS at the moment */
  624. if (addr > 0)
  625. return -EINVAL;
  626. /* The bottom 3 bits are flags */
  627. if ((data & ~0x7UL) >= TASK_SIZE)
  628. return -EIO;
  629. /* Ensure translation is on */
  630. if (data && !(data & DABR_TRANSLATION))
  631. return -EIO;
  632. task->thread.dabr = data;
  633. return 0;
  634. }
  635. /*
  636. * Called by kernel/ptrace.c when detaching..
  637. *
  638. * Make sure single step bits etc are not set.
  639. */
  640. void ptrace_disable(struct task_struct *child)
  641. {
  642. /* make sure the single step bit is not set. */
  643. user_disable_single_step(child);
  644. }
  645. /*
  646. * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  647. * we mark them as obsolete now, they will be removed in a future version
  648. */
  649. static long arch_ptrace_old(struct task_struct *child, long request, long addr,
  650. long data)
  651. {
  652. switch (request) {
  653. case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
  654. return copy_regset_to_user(child, &user_ppc_native_view,
  655. REGSET_GPR, 0, 32 * sizeof(long),
  656. (void __user *) data);
  657. case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
  658. return copy_regset_from_user(child, &user_ppc_native_view,
  659. REGSET_GPR, 0, 32 * sizeof(long),
  660. (const void __user *) data);
  661. case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
  662. return copy_regset_to_user(child, &user_ppc_native_view,
  663. REGSET_FPR, 0, 32 * sizeof(double),
  664. (void __user *) data);
  665. case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
  666. return copy_regset_from_user(child, &user_ppc_native_view,
  667. REGSET_FPR, 0, 32 * sizeof(double),
  668. (const void __user *) data);
  669. }
  670. return -EPERM;
  671. }
  672. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  673. {
  674. int ret = -EPERM;
  675. switch (request) {
  676. /* read the word at location addr in the USER area. */
  677. case PTRACE_PEEKUSR: {
  678. unsigned long index, tmp;
  679. ret = -EIO;
  680. /* convert to index and check */
  681. #ifdef CONFIG_PPC32
  682. index = (unsigned long) addr >> 2;
  683. if ((addr & 3) || (index > PT_FPSCR)
  684. || (child->thread.regs == NULL))
  685. #else
  686. index = (unsigned long) addr >> 3;
  687. if ((addr & 7) || (index > PT_FPSCR))
  688. #endif
  689. break;
  690. CHECK_FULL_REGS(child->thread.regs);
  691. if (index < PT_FPR0) {
  692. tmp = ptrace_get_reg(child, (int) index);
  693. } else {
  694. flush_fp_to_thread(child);
  695. tmp = ((unsigned long *)child->thread.fpr)
  696. [TS_FPRWIDTH * (index - PT_FPR0)];
  697. }
  698. ret = put_user(tmp,(unsigned long __user *) data);
  699. break;
  700. }
  701. /* write the word at location addr in the USER area */
  702. case PTRACE_POKEUSR: {
  703. unsigned long index;
  704. ret = -EIO;
  705. /* convert to index and check */
  706. #ifdef CONFIG_PPC32
  707. index = (unsigned long) addr >> 2;
  708. if ((addr & 3) || (index > PT_FPSCR)
  709. || (child->thread.regs == NULL))
  710. #else
  711. index = (unsigned long) addr >> 3;
  712. if ((addr & 7) || (index > PT_FPSCR))
  713. #endif
  714. break;
  715. CHECK_FULL_REGS(child->thread.regs);
  716. if (index < PT_FPR0) {
  717. ret = ptrace_put_reg(child, index, data);
  718. } else {
  719. flush_fp_to_thread(child);
  720. ((unsigned long *)child->thread.fpr)
  721. [TS_FPRWIDTH * (index - PT_FPR0)] = data;
  722. ret = 0;
  723. }
  724. break;
  725. }
  726. case PTRACE_GET_DEBUGREG: {
  727. ret = -EINVAL;
  728. /* We only support one DABR and no IABRS at the moment */
  729. if (addr > 0)
  730. break;
  731. ret = put_user(child->thread.dabr,
  732. (unsigned long __user *)data);
  733. break;
  734. }
  735. case PTRACE_SET_DEBUGREG:
  736. ret = ptrace_set_debugreg(child, addr, data);
  737. break;
  738. #ifdef CONFIG_PPC64
  739. case PTRACE_GETREGS64:
  740. #endif
  741. case PTRACE_GETREGS: /* Get all pt_regs from the child. */
  742. return copy_regset_to_user(child, &user_ppc_native_view,
  743. REGSET_GPR,
  744. 0, sizeof(struct pt_regs),
  745. (void __user *) data);
  746. #ifdef CONFIG_PPC64
  747. case PTRACE_SETREGS64:
  748. #endif
  749. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  750. return copy_regset_from_user(child, &user_ppc_native_view,
  751. REGSET_GPR,
  752. 0, sizeof(struct pt_regs),
  753. (const void __user *) data);
  754. case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
  755. return copy_regset_to_user(child, &user_ppc_native_view,
  756. REGSET_FPR,
  757. 0, sizeof(elf_fpregset_t),
  758. (void __user *) data);
  759. case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
  760. return copy_regset_from_user(child, &user_ppc_native_view,
  761. REGSET_FPR,
  762. 0, sizeof(elf_fpregset_t),
  763. (const void __user *) data);
  764. #ifdef CONFIG_ALTIVEC
  765. case PTRACE_GETVRREGS:
  766. return copy_regset_to_user(child, &user_ppc_native_view,
  767. REGSET_VMX,
  768. 0, (33 * sizeof(vector128) +
  769. sizeof(u32)),
  770. (void __user *) data);
  771. case PTRACE_SETVRREGS:
  772. return copy_regset_from_user(child, &user_ppc_native_view,
  773. REGSET_VMX,
  774. 0, (33 * sizeof(vector128) +
  775. sizeof(u32)),
  776. (const void __user *) data);
  777. #endif
  778. #ifdef CONFIG_VSX
  779. case PTRACE_GETVSRREGS:
  780. return copy_regset_to_user(child, &user_ppc_native_view,
  781. REGSET_VSX,
  782. 0, (32 * sizeof(vector128) +
  783. sizeof(u32)),
  784. (void __user *) data);
  785. case PTRACE_SETVSRREGS:
  786. return copy_regset_from_user(child, &user_ppc_native_view,
  787. REGSET_VSX,
  788. 0, (32 * sizeof(vector128) +
  789. sizeof(u32)),
  790. (const void __user *) data);
  791. #endif
  792. #ifdef CONFIG_SPE
  793. case PTRACE_GETEVRREGS:
  794. /* Get the child spe register state. */
  795. return copy_regset_to_user(child, &user_ppc_native_view,
  796. REGSET_SPE, 0, 35 * sizeof(u32),
  797. (void __user *) data);
  798. case PTRACE_SETEVRREGS:
  799. /* Set the child spe register state. */
  800. return copy_regset_from_user(child, &user_ppc_native_view,
  801. REGSET_SPE, 0, 35 * sizeof(u32),
  802. (const void __user *) data);
  803. #endif
  804. /* Old reverse args ptrace callss */
  805. case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
  806. case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
  807. case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
  808. case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
  809. ret = arch_ptrace_old(child, request, addr, data);
  810. break;
  811. default:
  812. ret = ptrace_request(child, request, addr, data);
  813. break;
  814. }
  815. return ret;
  816. }
  817. static void do_syscall_trace(void)
  818. {
  819. /* the 0x80 provides a way for the tracing parent to distinguish
  820. between a syscall stop and SIGTRAP delivery */
  821. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  822. ? 0x80 : 0));
  823. /*
  824. * this isn't the same as continuing with a signal, but it will do
  825. * for normal use. strace only continues with a signal if the
  826. * stopping signal is not SIGTRAP. -brl
  827. */
  828. if (current->exit_code) {
  829. send_sig(current->exit_code, current, 1);
  830. current->exit_code = 0;
  831. }
  832. }
  833. void do_syscall_trace_enter(struct pt_regs *regs)
  834. {
  835. secure_computing(regs->gpr[0]);
  836. if (test_thread_flag(TIF_SYSCALL_TRACE)
  837. && (current->ptrace & PT_PTRACED))
  838. do_syscall_trace();
  839. if (unlikely(current->audit_context)) {
  840. #ifdef CONFIG_PPC64
  841. if (!test_thread_flag(TIF_32BIT))
  842. audit_syscall_entry(AUDIT_ARCH_PPC64,
  843. regs->gpr[0],
  844. regs->gpr[3], regs->gpr[4],
  845. regs->gpr[5], regs->gpr[6]);
  846. else
  847. #endif
  848. audit_syscall_entry(AUDIT_ARCH_PPC,
  849. regs->gpr[0],
  850. regs->gpr[3] & 0xffffffff,
  851. regs->gpr[4] & 0xffffffff,
  852. regs->gpr[5] & 0xffffffff,
  853. regs->gpr[6] & 0xffffffff);
  854. }
  855. }
  856. void do_syscall_trace_leave(struct pt_regs *regs)
  857. {
  858. if (unlikely(current->audit_context))
  859. audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
  860. regs->result);
  861. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  862. || test_thread_flag(TIF_SINGLESTEP))
  863. && (current->ptrace & PT_PTRACED))
  864. do_syscall_trace();
  865. }