ptrace.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071
  1. /*
  2. * PowerPC version
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Derived from "arch/m68k/kernel/ptrace.c"
  6. * Copyright (C) 1994 by Hamish Macdonald
  7. * Taken from linux/kernel/ptrace.c and modified for M680x0.
  8. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
  9. *
  10. * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11. * and Paul Mackerras (paulus@samba.org).
  12. *
  13. * This file is subject to the terms and conditions of the GNU General
  14. * Public License. See the file README.legal in the main directory of
  15. * this archive for more details.
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/errno.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/regset.h>
  24. #include <linux/elf.h>
  25. #include <linux/user.h>
  26. #include <linux/security.h>
  27. #include <linux/signal.h>
  28. #include <linux/seccomp.h>
  29. #include <linux/audit.h>
  30. #ifdef CONFIG_PPC32
  31. #include <linux/module.h>
  32. #endif
  33. #include <asm/uaccess.h>
  34. #include <asm/page.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/system.h>
  37. /*
  38. * does not yet catch signals sent when the child dies.
  39. * in exit.c or in signal.c.
  40. */
  41. /*
  42. * Set of msr bits that gdb can change on behalf of a process.
  43. */
  44. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  45. #define MSR_DEBUGCHANGE 0
  46. #else
  47. #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
  48. #endif
  49. /*
  50. * Max register writeable via put_reg
  51. */
  52. #ifdef CONFIG_PPC32
  53. #define PT_MAX_PUT_REG PT_MQ
  54. #else
  55. #define PT_MAX_PUT_REG PT_CCR
  56. #endif
  57. static unsigned long get_user_msr(struct task_struct *task)
  58. {
  59. return task->thread.regs->msr | task->thread.fpexc_mode;
  60. }
  61. static int set_user_msr(struct task_struct *task, unsigned long msr)
  62. {
  63. task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
  64. task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
  65. return 0;
  66. }
  67. /*
  68. * We prevent mucking around with the reserved area of trap
  69. * which are used internally by the kernel.
  70. */
  71. static int set_user_trap(struct task_struct *task, unsigned long trap)
  72. {
  73. task->thread.regs->trap = trap & 0xfff0;
  74. return 0;
  75. }
  76. /*
  77. * Get contents of register REGNO in task TASK.
  78. */
  79. unsigned long ptrace_get_reg(struct task_struct *task, int regno)
  80. {
  81. if (task->thread.regs == NULL)
  82. return -EIO;
  83. if (regno == PT_MSR)
  84. return get_user_msr(task);
  85. if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
  86. return ((unsigned long *)task->thread.regs)[regno];
  87. return -EIO;
  88. }
  89. /*
  90. * Write contents of register REGNO in task TASK.
  91. */
  92. int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
  93. {
  94. if (task->thread.regs == NULL)
  95. return -EIO;
  96. if (regno == PT_MSR)
  97. return set_user_msr(task, data);
  98. if (regno == PT_TRAP)
  99. return set_user_trap(task, data);
  100. if (regno <= PT_MAX_PUT_REG) {
  101. ((unsigned long *)task->thread.regs)[regno] = data;
  102. return 0;
  103. }
  104. return -EIO;
  105. }
  106. static int gpr_get(struct task_struct *target, const struct user_regset *regset,
  107. unsigned int pos, unsigned int count,
  108. void *kbuf, void __user *ubuf)
  109. {
  110. int ret;
  111. if (target->thread.regs == NULL)
  112. return -EIO;
  113. CHECK_FULL_REGS(target->thread.regs);
  114. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  115. target->thread.regs,
  116. 0, offsetof(struct pt_regs, msr));
  117. if (!ret) {
  118. unsigned long msr = get_user_msr(target);
  119. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
  120. offsetof(struct pt_regs, msr),
  121. offsetof(struct pt_regs, msr) +
  122. sizeof(msr));
  123. }
  124. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  125. offsetof(struct pt_regs, msr) + sizeof(long));
  126. if (!ret)
  127. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  128. &target->thread.regs->orig_gpr3,
  129. offsetof(struct pt_regs, orig_gpr3),
  130. sizeof(struct pt_regs));
  131. if (!ret)
  132. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  133. sizeof(struct pt_regs), -1);
  134. return ret;
  135. }
  136. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  137. unsigned int pos, unsigned int count,
  138. const void *kbuf, const void __user *ubuf)
  139. {
  140. unsigned long reg;
  141. int ret;
  142. if (target->thread.regs == NULL)
  143. return -EIO;
  144. CHECK_FULL_REGS(target->thread.regs);
  145. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  146. target->thread.regs,
  147. 0, PT_MSR * sizeof(reg));
  148. if (!ret && count > 0) {
  149. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  150. PT_MSR * sizeof(reg),
  151. (PT_MSR + 1) * sizeof(reg));
  152. if (!ret)
  153. ret = set_user_msr(target, reg);
  154. }
  155. BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
  156. offsetof(struct pt_regs, msr) + sizeof(long));
  157. if (!ret)
  158. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  159. &target->thread.regs->orig_gpr3,
  160. PT_ORIG_R3 * sizeof(reg),
  161. (PT_MAX_PUT_REG + 1) * sizeof(reg));
  162. if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
  163. ret = user_regset_copyin_ignore(
  164. &pos, &count, &kbuf, &ubuf,
  165. (PT_MAX_PUT_REG + 1) * sizeof(reg),
  166. PT_TRAP * sizeof(reg));
  167. if (!ret && count > 0) {
  168. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
  169. PT_TRAP * sizeof(reg),
  170. (PT_TRAP + 1) * sizeof(reg));
  171. if (!ret)
  172. ret = set_user_trap(target, reg);
  173. }
  174. if (!ret)
  175. ret = user_regset_copyin_ignore(
  176. &pos, &count, &kbuf, &ubuf,
  177. (PT_TRAP + 1) * sizeof(reg), -1);
  178. return ret;
  179. }
  180. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  181. unsigned int pos, unsigned int count,
  182. void *kbuf, void __user *ubuf)
  183. {
  184. #ifdef CONFIG_VSX
  185. double buf[33];
  186. int i;
  187. #endif
  188. flush_fp_to_thread(target);
  189. #ifdef CONFIG_VSX
  190. /* copy to local buffer then write that out */
  191. for (i = 0; i < 32 ; i++)
  192. buf[i] = target->thread.TS_FPR(i);
  193. memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
  194. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  195. #else
  196. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  197. offsetof(struct thread_struct, TS_FPR(32)));
  198. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  199. &target->thread.fpr, 0, -1);
  200. #endif
  201. }
  202. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  203. unsigned int pos, unsigned int count,
  204. const void *kbuf, const void __user *ubuf)
  205. {
  206. #ifdef CONFIG_VSX
  207. double buf[33];
  208. int i;
  209. #endif
  210. flush_fp_to_thread(target);
  211. #ifdef CONFIG_VSX
  212. /* copy to local buffer then write that out */
  213. i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
  214. if (i)
  215. return i;
  216. for (i = 0; i < 32 ; i++)
  217. target->thread.TS_FPR(i) = buf[i];
  218. memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
  219. return 0;
  220. #else
  221. BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
  222. offsetof(struct thread_struct, TS_FPR(32)));
  223. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  224. &target->thread.fpr, 0, -1);
  225. #endif
  226. }
  227. #ifdef CONFIG_ALTIVEC
  228. /*
  229. * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
  230. * The transfer totals 34 quadword. Quadwords 0-31 contain the
  231. * corresponding vector registers. Quadword 32 contains the vscr as the
  232. * last word (offset 12) within that quadword. Quadword 33 contains the
  233. * vrsave as the first word (offset 0) within the quadword.
  234. *
  235. * This definition of the VMX state is compatible with the current PPC32
  236. * ptrace interface. This allows signal handling and ptrace to use the
  237. * same structures. This also simplifies the implementation of a bi-arch
  238. * (combined (32- and 64-bit) gdb.
  239. */
  240. static int vr_active(struct task_struct *target,
  241. const struct user_regset *regset)
  242. {
  243. flush_altivec_to_thread(target);
  244. return target->thread.used_vr ? regset->n : 0;
  245. }
  246. static int vr_get(struct task_struct *target, const struct user_regset *regset,
  247. unsigned int pos, unsigned int count,
  248. void *kbuf, void __user *ubuf)
  249. {
  250. int ret;
  251. flush_altivec_to_thread(target);
  252. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  253. offsetof(struct thread_struct, vr[32]));
  254. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  255. &target->thread.vr, 0,
  256. 33 * sizeof(vector128));
  257. if (!ret) {
  258. /*
  259. * Copy out only the low-order word of vrsave.
  260. */
  261. union {
  262. elf_vrreg_t reg;
  263. u32 word;
  264. } vrsave;
  265. memset(&vrsave, 0, sizeof(vrsave));
  266. vrsave.word = target->thread.vrsave;
  267. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  268. 33 * sizeof(vector128), -1);
  269. }
  270. return ret;
  271. }
  272. static int vr_set(struct task_struct *target, const struct user_regset *regset,
  273. unsigned int pos, unsigned int count,
  274. const void *kbuf, const void __user *ubuf)
  275. {
  276. int ret;
  277. flush_altivec_to_thread(target);
  278. BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
  279. offsetof(struct thread_struct, vr[32]));
  280. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  281. &target->thread.vr, 0, 33 * sizeof(vector128));
  282. if (!ret && count > 0) {
  283. /*
  284. * We use only the first word of vrsave.
  285. */
  286. union {
  287. elf_vrreg_t reg;
  288. u32 word;
  289. } vrsave;
  290. memset(&vrsave, 0, sizeof(vrsave));
  291. vrsave.word = target->thread.vrsave;
  292. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
  293. 33 * sizeof(vector128), -1);
  294. if (!ret)
  295. target->thread.vrsave = vrsave.word;
  296. }
  297. return ret;
  298. }
  299. #endif /* CONFIG_ALTIVEC */
  300. #ifdef CONFIG_VSX
  301. /*
  302. * Currently to set and and get all the vsx state, you need to call
  303. * the fp and VMX calls aswell. This only get/sets the lower 32
  304. * 128bit VSX registers.
  305. */
  306. static int vsr_active(struct task_struct *target,
  307. const struct user_regset *regset)
  308. {
  309. flush_vsx_to_thread(target);
  310. return target->thread.used_vsr ? regset->n : 0;
  311. }
  312. static int vsr_get(struct task_struct *target, const struct user_regset *regset,
  313. unsigned int pos, unsigned int count,
  314. void *kbuf, void __user *ubuf)
  315. {
  316. double buf[32];
  317. int ret, i;
  318. flush_vsx_to_thread(target);
  319. for (i = 0; i < 32 ; i++)
  320. buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
  321. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  322. buf, 0, 32 * sizeof(double));
  323. return ret;
  324. }
  325. static int vsr_set(struct task_struct *target, const struct user_regset *regset,
  326. unsigned int pos, unsigned int count,
  327. const void *kbuf, const void __user *ubuf)
  328. {
  329. double buf[32];
  330. int ret,i;
  331. flush_vsx_to_thread(target);
  332. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  333. buf, 0, 32 * sizeof(double));
  334. for (i = 0; i < 32 ; i++)
  335. current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
  336. return ret;
  337. }
  338. #endif /* CONFIG_VSX */
  339. #ifdef CONFIG_SPE
  340. /*
  341. * For get_evrregs/set_evrregs functions 'data' has the following layout:
  342. *
  343. * struct {
  344. * u32 evr[32];
  345. * u64 acc;
  346. * u32 spefscr;
  347. * }
  348. */
  349. static int evr_active(struct task_struct *target,
  350. const struct user_regset *regset)
  351. {
  352. flush_spe_to_thread(target);
  353. return target->thread.used_spe ? regset->n : 0;
  354. }
  355. static int evr_get(struct task_struct *target, const struct user_regset *regset,
  356. unsigned int pos, unsigned int count,
  357. void *kbuf, void __user *ubuf)
  358. {
  359. int ret;
  360. flush_spe_to_thread(target);
  361. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  362. &target->thread.evr,
  363. 0, sizeof(target->thread.evr));
  364. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  365. offsetof(struct thread_struct, spefscr));
  366. if (!ret)
  367. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  368. &target->thread.acc,
  369. sizeof(target->thread.evr), -1);
  370. return ret;
  371. }
  372. static int evr_set(struct task_struct *target, const struct user_regset *regset,
  373. unsigned int pos, unsigned int count,
  374. const void *kbuf, const void __user *ubuf)
  375. {
  376. int ret;
  377. flush_spe_to_thread(target);
  378. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  379. &target->thread.evr,
  380. 0, sizeof(target->thread.evr));
  381. BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
  382. offsetof(struct thread_struct, spefscr));
  383. if (!ret)
  384. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  385. &target->thread.acc,
  386. sizeof(target->thread.evr), -1);
  387. return ret;
  388. }
  389. #endif /* CONFIG_SPE */
  390. /*
  391. * These are our native regset flavors.
  392. */
  393. enum powerpc_regset {
  394. REGSET_GPR,
  395. REGSET_FPR,
  396. #ifdef CONFIG_ALTIVEC
  397. REGSET_VMX,
  398. #endif
  399. #ifdef CONFIG_VSX
  400. REGSET_VSX,
  401. #endif
  402. #ifdef CONFIG_SPE
  403. REGSET_SPE,
  404. #endif
  405. };
  406. static const struct user_regset native_regsets[] = {
  407. [REGSET_GPR] = {
  408. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  409. .size = sizeof(long), .align = sizeof(long),
  410. .get = gpr_get, .set = gpr_set
  411. },
  412. [REGSET_FPR] = {
  413. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  414. .size = sizeof(double), .align = sizeof(double),
  415. .get = fpr_get, .set = fpr_set
  416. },
  417. #ifdef CONFIG_ALTIVEC
  418. [REGSET_VMX] = {
  419. .core_note_type = NT_PPC_VMX, .n = 34,
  420. .size = sizeof(vector128), .align = sizeof(vector128),
  421. .active = vr_active, .get = vr_get, .set = vr_set
  422. },
  423. #endif
  424. #ifdef CONFIG_VSX
  425. [REGSET_VSX] = {
  426. .core_note_type = NT_PPC_VSX, .n = 32,
  427. .size = sizeof(double), .align = sizeof(double),
  428. .active = vsr_active, .get = vsr_get, .set = vsr_set
  429. },
  430. #endif
  431. #ifdef CONFIG_SPE
  432. [REGSET_SPE] = {
  433. .n = 35,
  434. .size = sizeof(u32), .align = sizeof(u32),
  435. .active = evr_active, .get = evr_get, .set = evr_set
  436. },
  437. #endif
  438. };
  439. static const struct user_regset_view user_ppc_native_view = {
  440. .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  441. .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
  442. };
  443. #ifdef CONFIG_PPC64
  444. #include <linux/compat.h>
  445. static int gpr32_get(struct task_struct *target,
  446. const struct user_regset *regset,
  447. unsigned int pos, unsigned int count,
  448. void *kbuf, void __user *ubuf)
  449. {
  450. const unsigned long *regs = &target->thread.regs->gpr[0];
  451. compat_ulong_t *k = kbuf;
  452. compat_ulong_t __user *u = ubuf;
  453. compat_ulong_t reg;
  454. if (target->thread.regs == NULL)
  455. return -EIO;
  456. CHECK_FULL_REGS(target->thread.regs);
  457. pos /= sizeof(reg);
  458. count /= sizeof(reg);
  459. if (kbuf)
  460. for (; count > 0 && pos < PT_MSR; --count)
  461. *k++ = regs[pos++];
  462. else
  463. for (; count > 0 && pos < PT_MSR; --count)
  464. if (__put_user((compat_ulong_t) regs[pos++], u++))
  465. return -EFAULT;
  466. if (count > 0 && pos == PT_MSR) {
  467. reg = get_user_msr(target);
  468. if (kbuf)
  469. *k++ = reg;
  470. else if (__put_user(reg, u++))
  471. return -EFAULT;
  472. ++pos;
  473. --count;
  474. }
  475. if (kbuf)
  476. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  477. *k++ = regs[pos++];
  478. else
  479. for (; count > 0 && pos < PT_REGS_COUNT; --count)
  480. if (__put_user((compat_ulong_t) regs[pos++], u++))
  481. return -EFAULT;
  482. kbuf = k;
  483. ubuf = u;
  484. pos *= sizeof(reg);
  485. count *= sizeof(reg);
  486. return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  487. PT_REGS_COUNT * sizeof(reg), -1);
  488. }
  489. static int gpr32_set(struct task_struct *target,
  490. const struct user_regset *regset,
  491. unsigned int pos, unsigned int count,
  492. const void *kbuf, const void __user *ubuf)
  493. {
  494. unsigned long *regs = &target->thread.regs->gpr[0];
  495. const compat_ulong_t *k = kbuf;
  496. const compat_ulong_t __user *u = ubuf;
  497. compat_ulong_t reg;
  498. if (target->thread.regs == NULL)
  499. return -EIO;
  500. CHECK_FULL_REGS(target->thread.regs);
  501. pos /= sizeof(reg);
  502. count /= sizeof(reg);
  503. if (kbuf)
  504. for (; count > 0 && pos < PT_MSR; --count)
  505. regs[pos++] = *k++;
  506. else
  507. for (; count > 0 && pos < PT_MSR; --count) {
  508. if (__get_user(reg, u++))
  509. return -EFAULT;
  510. regs[pos++] = reg;
  511. }
  512. if (count > 0 && pos == PT_MSR) {
  513. if (kbuf)
  514. reg = *k++;
  515. else if (__get_user(reg, u++))
  516. return -EFAULT;
  517. set_user_msr(target, reg);
  518. ++pos;
  519. --count;
  520. }
  521. if (kbuf) {
  522. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
  523. regs[pos++] = *k++;
  524. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  525. ++k;
  526. } else {
  527. for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
  528. if (__get_user(reg, u++))
  529. return -EFAULT;
  530. regs[pos++] = reg;
  531. }
  532. for (; count > 0 && pos < PT_TRAP; --count, ++pos)
  533. if (__get_user(reg, u++))
  534. return -EFAULT;
  535. }
  536. if (count > 0 && pos == PT_TRAP) {
  537. if (kbuf)
  538. reg = *k++;
  539. else if (__get_user(reg, u++))
  540. return -EFAULT;
  541. set_user_trap(target, reg);
  542. ++pos;
  543. --count;
  544. }
  545. kbuf = k;
  546. ubuf = u;
  547. pos *= sizeof(reg);
  548. count *= sizeof(reg);
  549. return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  550. (PT_TRAP + 1) * sizeof(reg), -1);
  551. }
  552. /*
  553. * These are the regset flavors matching the CONFIG_PPC32 native set.
  554. */
  555. static const struct user_regset compat_regsets[] = {
  556. [REGSET_GPR] = {
  557. .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
  558. .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
  559. .get = gpr32_get, .set = gpr32_set
  560. },
  561. [REGSET_FPR] = {
  562. .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
  563. .size = sizeof(double), .align = sizeof(double),
  564. .get = fpr_get, .set = fpr_set
  565. },
  566. #ifdef CONFIG_ALTIVEC
  567. [REGSET_VMX] = {
  568. .core_note_type = NT_PPC_VMX, .n = 34,
  569. .size = sizeof(vector128), .align = sizeof(vector128),
  570. .active = vr_active, .get = vr_get, .set = vr_set
  571. },
  572. #endif
  573. #ifdef CONFIG_SPE
  574. [REGSET_SPE] = {
  575. .core_note_type = NT_PPC_SPE, .n = 35,
  576. .size = sizeof(u32), .align = sizeof(u32),
  577. .active = evr_active, .get = evr_get, .set = evr_set
  578. },
  579. #endif
  580. };
  581. static const struct user_regset_view user_ppc_compat_view = {
  582. .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
  583. .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
  584. };
  585. #endif /* CONFIG_PPC64 */
  586. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  587. {
  588. #ifdef CONFIG_PPC64
  589. if (test_tsk_thread_flag(task, TIF_32BIT))
  590. return &user_ppc_compat_view;
  591. #endif
  592. return &user_ppc_native_view;
  593. }
  594. void user_enable_single_step(struct task_struct *task)
  595. {
  596. struct pt_regs *regs = task->thread.regs;
  597. if (regs != NULL) {
  598. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  599. task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
  600. regs->msr |= MSR_DE;
  601. #else
  602. regs->msr |= MSR_SE;
  603. #endif
  604. }
  605. set_tsk_thread_flag(task, TIF_SINGLESTEP);
  606. }
  607. void user_disable_single_step(struct task_struct *task)
  608. {
  609. struct pt_regs *regs = task->thread.regs;
  610. #if defined(CONFIG_BOOKE)
  611. /* If DAC then do not single step, skip */
  612. if (task->thread.dabr)
  613. return;
  614. #endif
  615. if (regs != NULL) {
  616. #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
  617. task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
  618. regs->msr &= ~MSR_DE;
  619. #else
  620. regs->msr &= ~MSR_SE;
  621. #endif
  622. }
  623. clear_tsk_thread_flag(task, TIF_SINGLESTEP);
  624. }
  625. int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
  626. unsigned long data)
  627. {
  628. /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
  629. * For embedded processors we support one DAC and no IAC's at the
  630. * moment.
  631. */
  632. if (addr > 0)
  633. return -EINVAL;
  634. /* The bottom 3 bits in dabr are flags */
  635. if ((data & ~0x7UL) >= TASK_SIZE)
  636. return -EIO;
  637. #ifndef CONFIG_BOOKE
  638. /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
  639. * It was assumed, on previous implementations, that 3 bits were
  640. * passed together with the data address, fitting the design of the
  641. * DABR register, as follows:
  642. *
  643. * bit 0: Read flag
  644. * bit 1: Write flag
  645. * bit 2: Breakpoint translation
  646. *
  647. * Thus, we use them here as so.
  648. */
  649. /* Ensure breakpoint translation bit is set */
  650. if (data && !(data & DABR_TRANSLATION))
  651. return -EIO;
  652. /* Move contents to the DABR register */
  653. task->thread.dabr = data;
  654. #endif
  655. #if defined(CONFIG_BOOKE)
  656. /* As described above, it was assumed 3 bits were passed with the data
  657. * address, but we will assume only the mode bits will be passed
  658. * as to not cause alignment restrictions for DAC-based processors.
  659. */
  660. /* DAC's hold the whole address without any mode flags */
  661. task->thread.dabr = data & ~0x3UL;
  662. if (task->thread.dabr == 0) {
  663. task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
  664. task->thread.regs->msr &= ~MSR_DE;
  665. return 0;
  666. }
  667. /* Read or Write bits must be set */
  668. if (!(data & 0x3UL))
  669. return -EINVAL;
  670. /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
  671. register */
  672. task->thread.dbcr0 = DBCR0_IDM;
  673. /* Check for write and read flags and set DBCR0
  674. accordingly */
  675. if (data & 0x1UL)
  676. task->thread.dbcr0 |= DBSR_DAC1R;
  677. if (data & 0x2UL)
  678. task->thread.dbcr0 |= DBSR_DAC1W;
  679. task->thread.regs->msr |= MSR_DE;
  680. #endif
  681. return 0;
  682. }
  683. /*
  684. * Called by kernel/ptrace.c when detaching..
  685. *
  686. * Make sure single step bits etc are not set.
  687. */
  688. void ptrace_disable(struct task_struct *child)
  689. {
  690. /* make sure the single step bit is not set. */
  691. user_disable_single_step(child);
  692. }
  693. /*
  694. * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
  695. * we mark them as obsolete now, they will be removed in a future version
  696. */
  697. static long arch_ptrace_old(struct task_struct *child, long request, long addr,
  698. long data)
  699. {
  700. switch (request) {
  701. case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
  702. return copy_regset_to_user(child, &user_ppc_native_view,
  703. REGSET_GPR, 0, 32 * sizeof(long),
  704. (void __user *) data);
  705. case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
  706. return copy_regset_from_user(child, &user_ppc_native_view,
  707. REGSET_GPR, 0, 32 * sizeof(long),
  708. (const void __user *) data);
  709. case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
  710. return copy_regset_to_user(child, &user_ppc_native_view,
  711. REGSET_FPR, 0, 32 * sizeof(double),
  712. (void __user *) data);
  713. case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
  714. return copy_regset_from_user(child, &user_ppc_native_view,
  715. REGSET_FPR, 0, 32 * sizeof(double),
  716. (const void __user *) data);
  717. }
  718. return -EPERM;
  719. }
  720. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  721. {
  722. int ret = -EPERM;
  723. switch (request) {
  724. /* read the word at location addr in the USER area. */
  725. case PTRACE_PEEKUSR: {
  726. unsigned long index, tmp;
  727. ret = -EIO;
  728. /* convert to index and check */
  729. #ifdef CONFIG_PPC32
  730. index = (unsigned long) addr >> 2;
  731. if ((addr & 3) || (index > PT_FPSCR)
  732. || (child->thread.regs == NULL))
  733. #else
  734. index = (unsigned long) addr >> 3;
  735. if ((addr & 7) || (index > PT_FPSCR))
  736. #endif
  737. break;
  738. CHECK_FULL_REGS(child->thread.regs);
  739. if (index < PT_FPR0) {
  740. tmp = ptrace_get_reg(child, (int) index);
  741. } else {
  742. flush_fp_to_thread(child);
  743. tmp = ((unsigned long *)child->thread.fpr)
  744. [TS_FPRWIDTH * (index - PT_FPR0)];
  745. }
  746. ret = put_user(tmp,(unsigned long __user *) data);
  747. break;
  748. }
  749. /* write the word at location addr in the USER area */
  750. case PTRACE_POKEUSR: {
  751. unsigned long index;
  752. ret = -EIO;
  753. /* convert to index and check */
  754. #ifdef CONFIG_PPC32
  755. index = (unsigned long) addr >> 2;
  756. if ((addr & 3) || (index > PT_FPSCR)
  757. || (child->thread.regs == NULL))
  758. #else
  759. index = (unsigned long) addr >> 3;
  760. if ((addr & 7) || (index > PT_FPSCR))
  761. #endif
  762. break;
  763. CHECK_FULL_REGS(child->thread.regs);
  764. if (index < PT_FPR0) {
  765. ret = ptrace_put_reg(child, index, data);
  766. } else {
  767. flush_fp_to_thread(child);
  768. ((unsigned long *)child->thread.fpr)
  769. [TS_FPRWIDTH * (index - PT_FPR0)] = data;
  770. ret = 0;
  771. }
  772. break;
  773. }
  774. case PTRACE_GET_DEBUGREG: {
  775. ret = -EINVAL;
  776. /* We only support one DABR and no IABRS at the moment */
  777. if (addr > 0)
  778. break;
  779. ret = put_user(child->thread.dabr,
  780. (unsigned long __user *)data);
  781. break;
  782. }
  783. case PTRACE_SET_DEBUGREG:
  784. ret = ptrace_set_debugreg(child, addr, data);
  785. break;
  786. #ifdef CONFIG_PPC64
  787. case PTRACE_GETREGS64:
  788. #endif
  789. case PTRACE_GETREGS: /* Get all pt_regs from the child. */
  790. return copy_regset_to_user(child, &user_ppc_native_view,
  791. REGSET_GPR,
  792. 0, sizeof(struct pt_regs),
  793. (void __user *) data);
  794. #ifdef CONFIG_PPC64
  795. case PTRACE_SETREGS64:
  796. #endif
  797. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  798. return copy_regset_from_user(child, &user_ppc_native_view,
  799. REGSET_GPR,
  800. 0, sizeof(struct pt_regs),
  801. (const void __user *) data);
  802. case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
  803. return copy_regset_to_user(child, &user_ppc_native_view,
  804. REGSET_FPR,
  805. 0, sizeof(elf_fpregset_t),
  806. (void __user *) data);
  807. case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
  808. return copy_regset_from_user(child, &user_ppc_native_view,
  809. REGSET_FPR,
  810. 0, sizeof(elf_fpregset_t),
  811. (const void __user *) data);
  812. #ifdef CONFIG_ALTIVEC
  813. case PTRACE_GETVRREGS:
  814. return copy_regset_to_user(child, &user_ppc_native_view,
  815. REGSET_VMX,
  816. 0, (33 * sizeof(vector128) +
  817. sizeof(u32)),
  818. (void __user *) data);
  819. case PTRACE_SETVRREGS:
  820. return copy_regset_from_user(child, &user_ppc_native_view,
  821. REGSET_VMX,
  822. 0, (33 * sizeof(vector128) +
  823. sizeof(u32)),
  824. (const void __user *) data);
  825. #endif
  826. #ifdef CONFIG_VSX
  827. case PTRACE_GETVSRREGS:
  828. return copy_regset_to_user(child, &user_ppc_native_view,
  829. REGSET_VSX,
  830. 0, (32 * sizeof(vector128) +
  831. sizeof(u32)),
  832. (void __user *) data);
  833. case PTRACE_SETVSRREGS:
  834. return copy_regset_from_user(child, &user_ppc_native_view,
  835. REGSET_VSX,
  836. 0, (32 * sizeof(vector128) +
  837. sizeof(u32)),
  838. (const void __user *) data);
  839. #endif
  840. #ifdef CONFIG_SPE
  841. case PTRACE_GETEVRREGS:
  842. /* Get the child spe register state. */
  843. return copy_regset_to_user(child, &user_ppc_native_view,
  844. REGSET_SPE, 0, 35 * sizeof(u32),
  845. (void __user *) data);
  846. case PTRACE_SETEVRREGS:
  847. /* Set the child spe register state. */
  848. return copy_regset_from_user(child, &user_ppc_native_view,
  849. REGSET_SPE, 0, 35 * sizeof(u32),
  850. (const void __user *) data);
  851. #endif
  852. /* Old reverse args ptrace callss */
  853. case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
  854. case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
  855. case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
  856. case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
  857. ret = arch_ptrace_old(child, request, addr, data);
  858. break;
  859. default:
  860. ret = ptrace_request(child, request, addr, data);
  861. break;
  862. }
  863. return ret;
  864. }
  865. static void do_syscall_trace(void)
  866. {
  867. /* the 0x80 provides a way for the tracing parent to distinguish
  868. between a syscall stop and SIGTRAP delivery */
  869. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  870. ? 0x80 : 0));
  871. /*
  872. * this isn't the same as continuing with a signal, but it will do
  873. * for normal use. strace only continues with a signal if the
  874. * stopping signal is not SIGTRAP. -brl
  875. */
  876. if (current->exit_code) {
  877. send_sig(current->exit_code, current, 1);
  878. current->exit_code = 0;
  879. }
  880. }
  881. void do_syscall_trace_enter(struct pt_regs *regs)
  882. {
  883. secure_computing(regs->gpr[0]);
  884. if (test_thread_flag(TIF_SYSCALL_TRACE)
  885. && (current->ptrace & PT_PTRACED))
  886. do_syscall_trace();
  887. if (unlikely(current->audit_context)) {
  888. #ifdef CONFIG_PPC64
  889. if (!test_thread_flag(TIF_32BIT))
  890. audit_syscall_entry(AUDIT_ARCH_PPC64,
  891. regs->gpr[0],
  892. regs->gpr[3], regs->gpr[4],
  893. regs->gpr[5], regs->gpr[6]);
  894. else
  895. #endif
  896. audit_syscall_entry(AUDIT_ARCH_PPC,
  897. regs->gpr[0],
  898. regs->gpr[3] & 0xffffffff,
  899. regs->gpr[4] & 0xffffffff,
  900. regs->gpr[5] & 0xffffffff,
  901. regs->gpr[6] & 0xffffffff);
  902. }
  903. }
  904. void do_syscall_trace_leave(struct pt_regs *regs)
  905. {
  906. if (unlikely(current->audit_context))
  907. audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
  908. regs->result);
  909. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  910. || test_thread_flag(TIF_SINGLESTEP))
  911. && (current->ptrace & PT_PTRACED))
  912. do_syscall_trace();
  913. }