ptrace.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Derived from "arch/m68k/kernel/ptrace.c"
  6. * Copyright (C) 1994 by Hamish Macdonald
  7. * Taken from linux/kernel/ptrace.c and modified for M680x0.
  8. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  9. *
  10. * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11. * and Paul Mackerras (paulus@samba.org).
  12. *
  13. * This file is subject to the terms and conditions of the GNU General
  14. * Public License. See the file README.legal in the main directory of
  15. * this archive for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/errno.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/regset.h>
  24. #include <linux/elf.h>
  25. #include <linux/user.h>
  26. #include <linux/security.h>
  27. #include <linux/signal.h>
  28. #include <linux/seccomp.h>
  29. #include <linux/audit.h>
  30. #ifdef CONFIG_PPC32
  31. #include <linux/module.h>
  32. #endif
  33. #include <asm/uaccess.h>
  34. #include <asm/page.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/system.h>
  37. /*
  38. * does not yet catch signals sent when the child dies.
  39. * in exit.c or in signal.c.
  40. */
  41. /*
  42. * Set of msr bits that gdb can change on behalf of a process.
  43. */
  44. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  45. #define MSR_DEBUGCHANGE 0
  46. #else
  47. #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
  48. #endif
  49. /*
  50. * Max register writeable via put_reg
  51. */
  52. #ifdef CONFIG_PPC32
  53. #define PT_MAX_PUT_REG PT_MQ
  54. #else
  55. #define PT_MAX_PUT_REG PT_CCR
  56. #endif
  57. static unsigned long get_user_msr(struct task_struct *task)
  58. {
  59. return task->thread.regs->msr | task->thread.fpexc_mode;
  60. }
  61. static int set_user_msr(struct task_struct *task, unsigned long msr)
  62. {
  63. task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
  64. task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
  65. return 0;
  66. }
  67. /*
  68. * We prevent mucking around with the reserved area of trap
  69. * which are used internally by the kernel.
  70. */
  71. static int set_user_trap(struct task_struct *task, unsigned long trap)
  72. {
  73. task->thread.regs->trap = trap & 0xfff0;
  74. return 0;
  75. }
  76. /*
  77. * Get contents of register REGNO in task TASK.
  78. */
  79. unsigned long ptrace_get_reg(struct task_struct *task, int regno)
  80. {
  81. if (task->thread.regs == NULL)
  82. return -EIO;
  83. if (regno == PT_MSR)
  84. return get_user_msr(task);
  85. if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
  86. return ((unsigned long *)task->thread.regs)[regno];
  87. return -EIO;
  88. }
  89. /*
  90. * Write contents of register REGNO in task TASK.
  91. */
  92. int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
  93. {
  94. if (task->thread.regs == NULL)
  95. return -EIO;
  96. if (regno == PT_MSR)
  97. return set_user_msr(task, data);
  98. if (regno == PT_TRAP)
  99. return set_user_trap(task, data);
  100. if (regno <= PT_MAX_PUT_REG) {
  101. ((unsigned long *)task->thread.regs)[regno] = data;
  102. return 0;
  103. }
  104. return -EIO;
  105. }
  106. static int gpr_get(struct task_struct *target, const struct user_regset *regset,
  107. unsigned int pos, unsigned int count,
  108. void *kbuf, void __user *ubuf)
  109. {
  110. int ret;
  111. if (target->thread.regs == NULL)
  112. return -EIO;
  113. CHECK_FULL_REGS(target->thread.regs);
  114. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  115. target->thread.regs,
  116. 0, offsetof(struct pt_regs, msr));
  117. if (!ret) {
  118. unsigned long msr = get_user_msr(target);
  119. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
  120. offsetof(struct pt_regs, msr),
  121. offsetof(struct pt_regs, msr) +
  122. sizeof(msr));
  123. }
  124. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  125. offsetof(struct pt_regs, msr) + sizeof(long));
  126. if (!ret)
  127. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  128. &target->thread.regs->orig_gpr3,
  129. offsetof(struct pt_regs, orig_gpr3),
  130. sizeof(struct pt_regs));
  131. if (!ret)
  132. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  133. sizeof(struct pt_regs), -1);
  134. return ret;
  135. }
  136. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  137. unsigned int pos, unsigned int count,
  138. const void *kbuf, const void __user *ubuf)
  139. {
  140. unsigned long reg;
  141. int ret;
  142. if (target->thread.regs == NULL)
  143. return -EIO;
  144. CHECK_FULL_REGS(target->thread.regs);
  145. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  146. target->thread.regs,
  147. 0, PT_MSR * sizeof(reg));
  148. if (!ret && count > 0) {
  149. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  150. PT_MSR * sizeof(reg),
  151. (PT_MSR + 1) * sizeof(reg));
  152. if (!ret)
  153. ret = set_user_msr(target, reg);
  154. }
  155. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  156. offsetof(struct pt_regs, msr) + sizeof(long));
  157. if (!ret)
  158. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  159. &target->thread.regs->orig_gpr3,
  160. PT_ORIG_R3 * sizeof(reg),
  161. (PT_MAX_PUT_REG + 1) * sizeof(reg));
  162. if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
  163. ret = user_regset_copyin_ignore(
  164. &pos, &count, &kbuf, &ubuf,
  165. (PT_MAX_PUT_REG + 1) * sizeof(reg),
  166. PT_TRAP * sizeof(reg));
  167. if (!ret && count > 0) {
  168. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  169. PT_TRAP * sizeof(reg),
  170. (PT_TRAP + 1) * sizeof(reg));
  171. if (!ret)
  172. ret = set_user_trap(target, reg);
  173. }
  174. if (!ret)
  175. ret = user_regset_copyin_ignore(
  176. &pos, &count, &kbuf, &ubuf,
  177. (PT_TRAP + 1) * sizeof(reg), -1);
  178. return ret;
  179. }
  180. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  181. unsigned int pos, unsigned int count,
  182. void *kbuf, void __user *ubuf)
  183. {
  184. flush_fp_to_thread(target);
  185. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  186. offsetof(struct thread_struct, fpr[32]));
  187. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  188. &target->thread.fpr, 0, -1);
  189. }
  190. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  191. unsigned int pos, unsigned int count,
  192. const void *kbuf, const void __user *ubuf)
  193. {
  194. flush_fp_to_thread(target);
  195. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  196. offsetof(struct thread_struct, fpr[32]));
  197. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  198. &target->thread.fpr, 0, -1);
  199. }
  200. static int get_fpregs(void __user *data, struct task_struct *task,
  201. int has_fpscr)
  202. {
  203. unsigned int count = has_fpscr ? 33 : 32;
  204. if (!access_ok(VERIFY_WRITE, data, count * sizeof(double)))
  205. return -EFAULT;
  206. return fpr_get(task, NULL, 0, count * sizeof(double), NULL, data);
  207. }
  208. static int set_fpregs(void __user *data, struct task_struct *task,
  209. int has_fpscr)
  210. {
  211. unsigned int count = has_fpscr ? 33 : 32;
  212. if (!access_ok(VERIFY_READ, data, count * sizeof(double)))
  213. return -EFAULT;
  214. return fpr_set(task, NULL, 0, count * sizeof(double), NULL, data);
  215. }
  216. #ifdef CONFIG_ALTIVEC
  217. /*
  218. * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
  219. * The transfer totals 34 quadword. Quadwords 0-31 contain the
  220. * corresponding vector registers. Quadword 32 contains the vscr as the
  221. * last word (offset 12) within that quadword. Quadword 33 contains the
  222. * vrsave as the first word (offset 0) within the quadword.
  223. *
  224. * This definition of the VMX state is compatible with the current PPC32
  225. * ptrace interface. This allows signal handling and ptrace to use the
  226. * same structures. This also simplifies the implementation of a bi-arch
  227. * (combined (32- and 64-bit) gdb.
  228. */
  229. static int vr_active(struct task_struct *target,
  230. const struct user_regset *regset)
  231. {
  232. flush_altivec_to_thread(target);
  233. return target->thread.used_vr ? regset->n : 0;
  234. }
  235. static int vr_get(struct task_struct *target, const struct user_regset *regset,
  236. unsigned int pos, unsigned int count,
  237. void *kbuf, void __user *ubuf)
  238. {
  239. int ret;
  240. flush_altivec_to_thread(target);
  241. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  242. offsetof(struct thread_struct, vr[32]));
  243. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  244. &target->thread.vr, 0,
  245. 33 * sizeof(vector128));
  246. if (!ret) {
  247. /*
  248. * Copy out only the low-order word of vrsave.
  249. */
  250. union {
  251. elf_vrreg_t reg;
  252. u32 word;
  253. } vrsave;
  254. memset(&vrsave, 0, sizeof(vrsave));
  255. vrsave.word = target->thread.vrsave;
  256. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  257. 33 * sizeof(vector128), -1);
  258. }
  259. return ret;
  260. }
  261. static int vr_set(struct task_struct *target, const struct user_regset *regset,
  262. unsigned int pos, unsigned int count,
  263. const void *kbuf, const void __user *ubuf)
  264. {
  265. int ret;
  266. flush_altivec_to_thread(target);
  267. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  268. offsetof(struct thread_struct, vr[32]));
  269. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  270. &target->thread.vr, 0, 33 * sizeof(vector128));
  271. if (!ret && count > 0) {
  272. /*
  273. * We use only the first word of vrsave.
  274. */
  275. union {
  276. elf_vrreg_t reg;
  277. u32 word;
  278. } vrsave;
  279. memset(&vrsave, 0, sizeof(vrsave));
  280. vrsave.word = target->thread.vrsave;
  281. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
  282. 33 * sizeof(vector128), -1);
  283. if (!ret)
  284. target->thread.vrsave = vrsave.word;
  285. }
  286. return ret;
  287. }
  288. /*
  289. * Get contents of AltiVec register state in task TASK
  290. */
  291. static int get_vrregs(unsigned long __user *data, struct task_struct *task)
  292. {
  293. if (!access_ok(VERIFY_WRITE, data,
  294. 33 * sizeof(vector128) + sizeof(u32)))
  295. return -EFAULT;
  296. return vr_get(task, NULL, 0, 33 * sizeof(vector128) + sizeof(u32),
  297. NULL, data);
  298. }
  299. /*
  300. * Write contents of AltiVec register state into task TASK.
  301. */
  302. static int set_vrregs(struct task_struct *task, unsigned long __user *data)
  303. {
  304. if (!access_ok(VERIFY_READ, data, 33 * sizeof(vector128) + sizeof(u32)))
  305. return -EFAULT;
  306. return vr_set(task, NULL, 0, 33 * sizeof(vector128) + sizeof(u32),
  307. NULL, data);
  308. }
  309. #endif /* CONFIG_ALTIVEC */
  310. #ifdef CONFIG_SPE
  311. /*
  312. * For get_evrregs/set_evrregs functions 'data' has the following layout:
  313. *
  314. * struct {
  315. * u32 evr[32];
  316. * u64 acc;
  317. * u32 spefscr;
  318. * }
  319. */
  320. static int evr_active(struct task_struct *target,
  321. const struct user_regset *regset)
  322. {
  323. flush_spe_to_thread(target);
  324. return target->thread.used_spe ? regset->n : 0;
  325. }
  326. static int evr_get(struct task_struct *target, const struct user_regset *regset,
  327. unsigned int pos, unsigned int count,
  328. void *kbuf, void __user *ubuf)
  329. {
  330. int ret;
  331. flush_spe_to_thread(target);
  332. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  333. &target->thread.evr,
  334. 0, sizeof(target->thread.evr));
  335. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  336. offsetof(struct thread_struct, spefscr));
  337. if (!ret)
  338. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  339. &target->thread.acc,
  340. sizeof(target->thread.evr), -1);
  341. return ret;
  342. }
  343. static int evr_set(struct task_struct *target, const struct user_regset *regset,
  344. unsigned int pos, unsigned int count,
  345. const void *kbuf, const void __user *ubuf)
  346. {
  347. int ret;
  348. flush_spe_to_thread(target);
  349. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  350. &target->thread.evr,
  351. 0, sizeof(target->thread.evr));
  352. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  353. offsetof(struct thread_struct, spefscr));
  354. if (!ret)
  355. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  356. &target->thread.acc,
  357. sizeof(target->thread.evr), -1);
  358. return ret;
  359. }
  360. /*
  361. * Get contents of SPE register state in task TASK.
  362. */
  363. static int get_evrregs(unsigned long __user *data, struct task_struct *task)
  364. {
  365. if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(u32)))
  366. return -EFAULT;
  367. return evr_get(task, NULL, 0, 35 * sizeof(u32), NULL, data);
  368. }
  369. /*
  370. * Write contents of SPE register state into task TASK.
  371. */
  372. static int set_evrregs(struct task_struct *task, unsigned long *data)
  373. {
  374. if (!access_ok(VERIFY_READ, data, 35 * sizeof(u32)))
  375. return -EFAULT;
  376. return evr_set(task, NULL, 0, 35 * sizeof(u32), NULL, data);
  377. }
  378. #endif /* CONFIG_SPE */
  379. /*
  380. * These are our native regset flavors.
  381. */
  382. enum powerpc_regset {
  383. REGSET_GPR,
  384. REGSET_FPR,
  385. #ifdef CONFIG_ALTIVEC
  386. REGSET_VMX,
  387. #endif
  388. #ifdef CONFIG_SPE
  389. REGSET_SPE,
  390. #endif
  391. };
  392. static const struct user_regset native_regsets[] = {
  393. [REGSET_GPR] = {
  394. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  395. .size = sizeof(long), .align = sizeof(long),
  396. .get = gpr_get, .set = gpr_set
  397. },
  398. [REGSET_FPR] = {
  399. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  400. .size = sizeof(double), .align = sizeof(double),
  401. .get = fpr_get, .set = fpr_set
  402. },
  403. #ifdef CONFIG_ALTIVEC
  404. [REGSET_VMX] = {
  405. .core_note_type = NT_PPC_VMX, .n = 34,
  406. .size = sizeof(vector128), .align = sizeof(vector128),
  407. .active = vr_active, .get = vr_get, .set = vr_set
  408. },
  409. #endif
  410. #ifdef CONFIG_SPE
  411. [REGSET_SPE] = {
  412. .n = 35,
  413. .size = sizeof(u32), .align = sizeof(u32),
  414. .active = evr_active, .get = evr_get, .set = evr_set
  415. },
  416. #endif
  417. };
  418. static const struct user_regset_view user_ppc_native_view = {
  419. .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  420. .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
  421. };
  422. #ifdef CONFIG_PPC64
  423. #include <linux/compat.h>
  424. static int gpr32_get(struct task_struct *target,
  425. const struct user_regset *regset,
  426. unsigned int pos, unsigned int count,
  427. void *kbuf, void __user *ubuf)
  428. {
  429. const unsigned long *regs = &target->thread.regs->gpr[0];
  430. compat_ulong_t *k = kbuf;
  431. compat_ulong_t __user *u = ubuf;
  432. compat_ulong_t reg;
  433. if (target->thread.regs == NULL)
  434. return -EIO;
  435. CHECK_FULL_REGS(target->thread.regs);
  436. pos /= sizeof(reg);
  437. count /= sizeof(reg);
  438. if (kbuf)
  439. for (; count > 0 && pos < PT_MSR; --count)
  440. *k++ = regs[pos++];
  441. else
  442. for (; count > 0 && pos < PT_MSR; --count)
  443. if (__put_user((compat_ulong_t) regs[pos++], u++))
  444. return -EFAULT;
  445. if (count > 0 && pos == PT_MSR) {
  446. reg = get_user_msr(target);
  447. if (kbuf)
  448. *k++ = reg;
  449. else if (__put_user(reg, u++))
  450. return -EFAULT;
  451. ++pos;
  452. --count;
  453. }
  454. if (kbuf)
  455. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  456. *k++ = regs[pos++];
  457. else
  458. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  459. if (__put_user((compat_ulong_t) regs[pos++], u++))
  460. return -EFAULT;
  461. kbuf = k;
  462. ubuf = u;
  463. pos *= sizeof(reg);
  464. count *= sizeof(reg);
  465. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  466. PT_REGS_COUNT * sizeof(reg), -1);
  467. }
  468. static int gpr32_set(struct task_struct *target,
  469. const struct user_regset *regset,
  470. unsigned int pos, unsigned int count,
  471. const void *kbuf, const void __user *ubuf)
  472. {
  473. unsigned long *regs = &target->thread.regs->gpr[0];
  474. const compat_ulong_t *k = kbuf;
  475. const compat_ulong_t __user *u = ubuf;
  476. compat_ulong_t reg;
  477. if (target->thread.regs == NULL)
  478. return -EIO;
  479. CHECK_FULL_REGS(target->thread.regs);
  480. pos /= sizeof(reg);
  481. count /= sizeof(reg);
  482. if (kbuf)
  483. for (; count > 0 && pos < PT_MSR; --count)
  484. regs[pos++] = *k++;
  485. else
  486. for (; count > 0 && pos < PT_MSR; --count) {
  487. if (__get_user(reg, u++))
  488. return -EFAULT;
  489. regs[pos++] = reg;
  490. }
  491. if (count > 0 && pos == PT_MSR) {
  492. if (kbuf)
  493. reg = *k++;
  494. else if (__get_user(reg, u++))
  495. return -EFAULT;
  496. set_user_msr(target, reg);
  497. ++pos;
  498. --count;
  499. }
  500. if (kbuf)
  501. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
  502. regs[pos++] = *k++;
  503. else
  504. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
  505. if (__get_user(reg, u++))
  506. return -EFAULT;
  507. regs[pos++] = reg;
  508. }
  509. if (count > 0 && pos == PT_TRAP) {
  510. if (kbuf)
  511. reg = *k++;
  512. else if (__get_user(reg, u++))
  513. return -EFAULT;
  514. set_user_trap(target, reg);
  515. ++pos;
  516. --count;
  517. }
  518. kbuf = k;
  519. ubuf = u;
  520. pos *= sizeof(reg);
  521. count *= sizeof(reg);
  522. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  523. (PT_TRAP + 1) * sizeof(reg), -1);
  524. }
  525. /*
  526. * These are the regset flavors matching the CONFIG_PPC32 native set.
  527. */
  528. static const struct user_regset compat_regsets[] = {
  529. [REGSET_GPR] = {
  530. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  531. .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
  532. .get = gpr32_get, .set = gpr32_set
  533. },
  534. [REGSET_FPR] = {
  535. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  536. .size = sizeof(double), .align = sizeof(double),
  537. .get = fpr_get, .set = fpr_set
  538. },
  539. #ifdef CONFIG_ALTIVEC
  540. [REGSET_VMX] = {
  541. .core_note_type = NT_PPC_VMX, .n = 34,
  542. .size = sizeof(vector128), .align = sizeof(vector128),
  543. .active = vr_active, .get = vr_get, .set = vr_set
  544. },
  545. #endif
  546. #ifdef CONFIG_SPE
  547. [REGSET_SPE] = {
  548. .n = 35,
  549. .size = sizeof(u32), .align = sizeof(u32),
  550. .active = evr_active, .get = evr_get, .set = evr_set
  551. },
  552. #endif
  553. };
  554. static const struct user_regset_view user_ppc_compat_view = {
  555. .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
  556. .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
  557. };
  558. #endif /* CONFIG_PPC64 */
  559. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  560. {
  561. #ifdef CONFIG_PPC64
  562. if (test_tsk_thread_flag(task, TIF_32BIT))
  563. return &user_ppc_compat_view;
  564. #endif
  565. return &user_ppc_native_view;
  566. }
  567. void user_enable_single_step(struct task_struct *task)
  568. {
  569. struct pt_regs *regs = task->thread.regs;
  570. if (regs != NULL) {
  571. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  572. task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC;
  573. regs->msr |= MSR_DE;
  574. #else
  575. regs->msr |= MSR_SE;
  576. #endif
  577. }
  578. set_tsk_thread_flag(task, TIF_SINGLESTEP);
  579. }
  580. void user_disable_single_step(struct task_struct *task)
  581. {
  582. struct pt_regs *regs = task->thread.regs;
  583. if (regs != NULL) {
  584. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  585. task->thread.dbcr0 = 0;
  586. regs->msr &= ~MSR_DE;
  587. #else
  588. regs->msr &= ~MSR_SE;
  589. #endif
  590. }
  591. clear_tsk_thread_flag(task, TIF_SINGLESTEP);
  592. }
  593. static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
  594. unsigned long data)
  595. {
  596. /* We only support one DABR and no IABRS at the moment */
  597. if (addr > 0)
  598. return -EINVAL;
  599. /* The bottom 3 bits are flags */
  600. if ((data & ~0x7UL) >= TASK_SIZE)
  601. return -EIO;
  602. /* Ensure translation is on */
  603. if (data && !(data & DABR_TRANSLATION))
  604. return -EIO;
  605. task->thread.dabr = data;
  606. return 0;
  607. }
  608. /*
  609. * Called by kernel/ptrace.c when detaching..
  610. *
  611. * Make sure single step bits etc are not set.
  612. */
  613. void ptrace_disable(struct task_struct *child)
  614. {
  615. /* make sure the single step bit is not set. */
  616. user_disable_single_step(child);
  617. }
  618. /*
  619. * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  620. * we mark them as obsolete now, they will be removed in a future version
  621. */
  622. static long arch_ptrace_old(struct task_struct *child, long request, long addr,
  623. long data)
  624. {
  625. int ret = -EPERM;
  626. switch(request) {
  627. case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
  628. int i;
  629. unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
  630. unsigned long __user *tmp = (unsigned long __user *)addr;
  631. CHECK_FULL_REGS(child->thread.regs);
  632. for (i = 0; i < 32; i++) {
  633. ret = put_user(*reg, tmp);
  634. if (ret)
  635. break;
  636. reg++;
  637. tmp++;
  638. }
  639. break;
  640. }
  641. case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
  642. int i;
  643. unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
  644. unsigned long __user *tmp = (unsigned long __user *)addr;
  645. CHECK_FULL_REGS(child->thread.regs);
  646. for (i = 0; i < 32; i++) {
  647. ret = get_user(*reg, tmp);
  648. if (ret)
  649. break;
  650. reg++;
  651. tmp++;
  652. }
  653. break;
  654. }
  655. case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
  656. flush_fp_to_thread(child);
  657. ret = get_fpregs((void __user *)addr, child, 0);
  658. break;
  659. }
  660. case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
  661. flush_fp_to_thread(child);
  662. ret = set_fpregs((void __user *)addr, child, 0);
  663. break;
  664. }
  665. }
  666. return ret;
  667. }
  668. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  669. {
  670. int ret = -EPERM;
  671. switch (request) {
  672. /* when I and D space are separate, these will need to be fixed. */
  673. case PTRACE_PEEKTEXT: /* read word at location addr. */
  674. case PTRACE_PEEKDATA:
  675. ret = generic_ptrace_peekdata(child, addr, data);
  676. break;
  677. /* read the word at location addr in the USER area. */
  678. case PTRACE_PEEKUSR: {
  679. unsigned long index, tmp;
  680. ret = -EIO;
  681. /* convert to index and check */
  682. #ifdef CONFIG_PPC32
  683. index = (unsigned long) addr >> 2;
  684. if ((addr & 3) || (index > PT_FPSCR)
  685. || (child->thread.regs == NULL))
  686. #else
  687. index = (unsigned long) addr >> 3;
  688. if ((addr & 7) || (index > PT_FPSCR))
  689. #endif
  690. break;
  691. CHECK_FULL_REGS(child->thread.regs);
  692. if (index < PT_FPR0) {
  693. tmp = ptrace_get_reg(child, (int) index);
  694. } else {
  695. flush_fp_to_thread(child);
  696. tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
  697. }
  698. ret = put_user(tmp,(unsigned long __user *) data);
  699. break;
  700. }
  701. /* If I and D space are separate, this will have to be fixed. */
  702. case PTRACE_POKETEXT: /* write the word at location addr. */
  703. case PTRACE_POKEDATA:
  704. ret = generic_ptrace_pokedata(child, addr, data);
  705. break;
  706. /* write the word at location addr in the USER area */
  707. case PTRACE_POKEUSR: {
  708. unsigned long index;
  709. ret = -EIO;
  710. /* convert to index and check */
  711. #ifdef CONFIG_PPC32
  712. index = (unsigned long) addr >> 2;
  713. if ((addr & 3) || (index > PT_FPSCR)
  714. || (child->thread.regs == NULL))
  715. #else
  716. index = (unsigned long) addr >> 3;
  717. if ((addr & 7) || (index > PT_FPSCR))
  718. #endif
  719. break;
  720. CHECK_FULL_REGS(child->thread.regs);
  721. if (index < PT_FPR0) {
  722. ret = ptrace_put_reg(child, index, data);
  723. } else {
  724. flush_fp_to_thread(child);
  725. ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
  726. ret = 0;
  727. }
  728. break;
  729. }
  730. case PTRACE_GET_DEBUGREG: {
  731. ret = -EINVAL;
  732. /* We only support one DABR and no IABRS at the moment */
  733. if (addr > 0)
  734. break;
  735. ret = put_user(child->thread.dabr,
  736. (unsigned long __user *)data);
  737. break;
  738. }
  739. case PTRACE_SET_DEBUGREG:
  740. ret = ptrace_set_debugreg(child, addr, data);
  741. break;
  742. #ifdef CONFIG_PPC64
  743. case PTRACE_GETREGS64:
  744. #endif
  745. case PTRACE_GETREGS: { /* Get all pt_regs from the child. */
  746. int ui;
  747. if (!access_ok(VERIFY_WRITE, (void __user *)data,
  748. sizeof(struct pt_regs))) {
  749. ret = -EIO;
  750. break;
  751. }
  752. CHECK_FULL_REGS(child->thread.regs);
  753. ret = 0;
  754. for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
  755. ret |= __put_user(ptrace_get_reg(child, ui),
  756. (unsigned long __user *) data);
  757. data += sizeof(long);
  758. }
  759. break;
  760. }
  761. #ifdef CONFIG_PPC64
  762. case PTRACE_SETREGS64:
  763. #endif
  764. case PTRACE_SETREGS: { /* Set all gp regs in the child. */
  765. unsigned long tmp;
  766. int ui;
  767. if (!access_ok(VERIFY_READ, (void __user *)data,
  768. sizeof(struct pt_regs))) {
  769. ret = -EIO;
  770. break;
  771. }
  772. CHECK_FULL_REGS(child->thread.regs);
  773. ret = 0;
  774. for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
  775. ret = __get_user(tmp, (unsigned long __user *) data);
  776. if (ret)
  777. break;
  778. ptrace_put_reg(child, ui, tmp);
  779. data += sizeof(long);
  780. }
  781. break;
  782. }
  783. case PTRACE_GETFPREGS: { /* Get the child FPU state (FPR0...31 + FPSCR) */
  784. flush_fp_to_thread(child);
  785. ret = get_fpregs((void __user *)data, child, 1);
  786. break;
  787. }
  788. case PTRACE_SETFPREGS: { /* Set the child FPU state (FPR0...31 + FPSCR) */
  789. flush_fp_to_thread(child);
  790. ret = set_fpregs((void __user *)data, child, 1);
  791. break;
  792. }
  793. #ifdef CONFIG_ALTIVEC
  794. case PTRACE_GETVRREGS:
  795. /* Get the child altivec register state. */
  796. flush_altivec_to_thread(child);
  797. ret = get_vrregs((unsigned long __user *)data, child);
  798. break;
  799. case PTRACE_SETVRREGS:
  800. /* Set the child altivec register state. */
  801. flush_altivec_to_thread(child);
  802. ret = set_vrregs(child, (unsigned long __user *)data);
  803. break;
  804. #endif
  805. #ifdef CONFIG_SPE
  806. case PTRACE_GETEVRREGS:
  807. /* Get the child spe register state. */
  808. flush_spe_to_thread(child);
  809. ret = get_evrregs((unsigned long __user *)data, child);
  810. break;
  811. case PTRACE_SETEVRREGS:
  812. /* Set the child spe register state. */
  813. /* this is to clear the MSR_SPE bit to force a reload
  814. * of register state from memory */
  815. flush_spe_to_thread(child);
  816. ret = set_evrregs(child, (unsigned long __user *)data);
  817. break;
  818. #endif
  819. /* Old reverse args ptrace callss */
  820. case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
  821. case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
  822. case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
  823. case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
  824. ret = arch_ptrace_old(child, request, addr, data);
  825. break;
  826. default:
  827. ret = ptrace_request(child, request, addr, data);
  828. break;
  829. }
  830. return ret;
  831. }
  832. static void do_syscall_trace(void)
  833. {
  834. /* the 0x80 provides a way for the tracing parent to distinguish
  835. between a syscall stop and SIGTRAP delivery */
  836. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  837. ? 0x80 : 0));
  838. /*
  839. * this isn't the same as continuing with a signal, but it will do
  840. * for normal use. strace only continues with a signal if the
  841. * stopping signal is not SIGTRAP. -brl
  842. */
  843. if (current->exit_code) {
  844. send_sig(current->exit_code, current, 1);
  845. current->exit_code = 0;
  846. }
  847. }
  848. void do_syscall_trace_enter(struct pt_regs *regs)
  849. {
  850. secure_computing(regs->gpr[0]);
  851. if (test_thread_flag(TIF_SYSCALL_TRACE)
  852. && (current->ptrace & PT_PTRACED))
  853. do_syscall_trace();
  854. if (unlikely(current->audit_context)) {
  855. #ifdef CONFIG_PPC64
  856. if (!test_thread_flag(TIF_32BIT))
  857. audit_syscall_entry(AUDIT_ARCH_PPC64,
  858. regs->gpr[0],
  859. regs->gpr[3], regs->gpr[4],
  860. regs->gpr[5], regs->gpr[6]);
  861. else
  862. #endif
  863. audit_syscall_entry(AUDIT_ARCH_PPC,
  864. regs->gpr[0],
  865. regs->gpr[3] & 0xffffffff,
  866. regs->gpr[4] & 0xffffffff,
  867. regs->gpr[5] & 0xffffffff,
  868. regs->gpr[6] & 0xffffffff);
  869. }
  870. }
  871. void do_syscall_trace_leave(struct pt_regs *regs)
  872. {
  873. if (unlikely(current->audit_context))
  874. audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
  875. regs->result);
  876. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  877. || test_thread_flag(TIF_SINGLESTEP))
  878. && (current->ptrace & PT_PTRACED))
  879. do_syscall_trace();
  880. }