ptrace.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/user.h>
  27. #include <linux/security.h>
  28. #include <linux/init.h>
  29. #include <linux/signal.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/perf_event.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/regset.h>
  34. #include <linux/tracehook.h>
  35. #include <linux/elf.h>
  36. #include <asm/compat.h>
  37. #include <asm/debug-monitors.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/traps.h>
  40. #include <asm/system_misc.h>
  41. /*
  42. * TODO: does not yet catch signals sent when the child dies.
  43. * in exit.c or in signal.c.
  44. */
  45. /*
  46. * Called by kernel/ptrace.c when detaching..
  47. */
  48. void ptrace_disable(struct task_struct *child)
  49. {
  50. }
  51. /*
  52. * Handle hitting a breakpoint.
  53. */
  54. static int ptrace_break(struct pt_regs *regs)
  55. {
  56. siginfo_t info = {
  57. .si_signo = SIGTRAP,
  58. .si_errno = 0,
  59. .si_code = TRAP_BRKPT,
  60. .si_addr = (void __user *)instruction_pointer(regs),
  61. };
  62. force_sig_info(SIGTRAP, &info, current);
  63. return 0;
  64. }
  65. static int arm64_break_trap(unsigned long addr, unsigned int esr,
  66. struct pt_regs *regs)
  67. {
  68. return ptrace_break(regs);
  69. }
  70. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  71. /*
  72. * Handle hitting a HW-breakpoint.
  73. */
  74. static void ptrace_hbptriggered(struct perf_event *bp,
  75. struct perf_sample_data *data,
  76. struct pt_regs *regs)
  77. {
  78. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  79. siginfo_t info = {
  80. .si_signo = SIGTRAP,
  81. .si_errno = 0,
  82. .si_code = TRAP_HWBKPT,
  83. .si_addr = (void __user *)(bkpt->trigger),
  84. };
  85. #ifdef CONFIG_COMPAT
  86. int i;
  87. if (!is_compat_task())
  88. goto send_sig;
  89. for (i = 0; i < ARM_MAX_BRP; ++i) {
  90. if (current->thread.debug.hbp_break[i] == bp) {
  91. info.si_errno = (i << 1) + 1;
  92. break;
  93. }
  94. }
  95. for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
  96. if (current->thread.debug.hbp_watch[i] == bp) {
  97. info.si_errno = -((i << 1) + 1);
  98. break;
  99. }
  100. }
  101. send_sig:
  102. #endif
  103. force_sig_info(SIGTRAP, &info, current);
  104. }
  105. /*
  106. * Unregister breakpoints from this task and reset the pointers in
  107. * the thread_struct.
  108. */
  109. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  110. {
  111. int i;
  112. struct thread_struct *t = &tsk->thread;
  113. for (i = 0; i < ARM_MAX_BRP; i++) {
  114. if (t->debug.hbp_break[i]) {
  115. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  116. t->debug.hbp_break[i] = NULL;
  117. }
  118. }
  119. for (i = 0; i < ARM_MAX_WRP; i++) {
  120. if (t->debug.hbp_watch[i]) {
  121. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  122. t->debug.hbp_watch[i] = NULL;
  123. }
  124. }
  125. }
  126. void ptrace_hw_copy_thread(struct task_struct *tsk)
  127. {
  128. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  129. }
  130. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  131. struct task_struct *tsk,
  132. unsigned long idx)
  133. {
  134. struct perf_event *bp = ERR_PTR(-EINVAL);
  135. switch (note_type) {
  136. case NT_ARM_HW_BREAK:
  137. if (idx < ARM_MAX_BRP)
  138. bp = tsk->thread.debug.hbp_break[idx];
  139. break;
  140. case NT_ARM_HW_WATCH:
  141. if (idx < ARM_MAX_WRP)
  142. bp = tsk->thread.debug.hbp_watch[idx];
  143. break;
  144. }
  145. return bp;
  146. }
  147. static int ptrace_hbp_set_event(unsigned int note_type,
  148. struct task_struct *tsk,
  149. unsigned long idx,
  150. struct perf_event *bp)
  151. {
  152. int err = -EINVAL;
  153. switch (note_type) {
  154. case NT_ARM_HW_BREAK:
  155. if (idx < ARM_MAX_BRP) {
  156. tsk->thread.debug.hbp_break[idx] = bp;
  157. err = 0;
  158. }
  159. break;
  160. case NT_ARM_HW_WATCH:
  161. if (idx < ARM_MAX_WRP) {
  162. tsk->thread.debug.hbp_watch[idx] = bp;
  163. err = 0;
  164. }
  165. break;
  166. }
  167. return err;
  168. }
  169. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  170. struct task_struct *tsk,
  171. unsigned long idx)
  172. {
  173. struct perf_event *bp;
  174. struct perf_event_attr attr;
  175. int err, type;
  176. switch (note_type) {
  177. case NT_ARM_HW_BREAK:
  178. type = HW_BREAKPOINT_X;
  179. break;
  180. case NT_ARM_HW_WATCH:
  181. type = HW_BREAKPOINT_RW;
  182. break;
  183. default:
  184. return ERR_PTR(-EINVAL);
  185. }
  186. ptrace_breakpoint_init(&attr);
  187. /*
  188. * Initialise fields to sane defaults
  189. * (i.e. values that will pass validation).
  190. */
  191. attr.bp_addr = 0;
  192. attr.bp_len = HW_BREAKPOINT_LEN_4;
  193. attr.bp_type = type;
  194. attr.disabled = 1;
  195. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  196. if (IS_ERR(bp))
  197. return bp;
  198. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  199. if (err)
  200. return ERR_PTR(err);
  201. return bp;
  202. }
  203. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  204. struct arch_hw_breakpoint_ctrl ctrl,
  205. struct perf_event_attr *attr)
  206. {
  207. int err, len, type;
  208. err = arch_bp_generic_fields(ctrl, &len, &type);
  209. if (err)
  210. return err;
  211. switch (note_type) {
  212. case NT_ARM_HW_BREAK:
  213. if ((type & HW_BREAKPOINT_X) != type)
  214. return -EINVAL;
  215. break;
  216. case NT_ARM_HW_WATCH:
  217. if ((type & HW_BREAKPOINT_RW) != type)
  218. return -EINVAL;
  219. break;
  220. default:
  221. return -EINVAL;
  222. }
  223. attr->bp_len = len;
  224. attr->bp_type = type;
  225. attr->disabled = !ctrl.enabled;
  226. return 0;
  227. }
  228. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  229. {
  230. u8 num;
  231. u32 reg = 0;
  232. switch (note_type) {
  233. case NT_ARM_HW_BREAK:
  234. num = hw_breakpoint_slots(TYPE_INST);
  235. break;
  236. case NT_ARM_HW_WATCH:
  237. num = hw_breakpoint_slots(TYPE_DATA);
  238. break;
  239. default:
  240. return -EINVAL;
  241. }
  242. reg |= debug_monitors_arch();
  243. reg <<= 8;
  244. reg |= num;
  245. *info = reg;
  246. return 0;
  247. }
  248. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  249. struct task_struct *tsk,
  250. unsigned long idx,
  251. u32 *ctrl)
  252. {
  253. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  254. if (IS_ERR(bp))
  255. return PTR_ERR(bp);
  256. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  257. return 0;
  258. }
  259. static int ptrace_hbp_get_addr(unsigned int note_type,
  260. struct task_struct *tsk,
  261. unsigned long idx,
  262. u64 *addr)
  263. {
  264. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  265. if (IS_ERR(bp))
  266. return PTR_ERR(bp);
  267. *addr = bp ? bp->attr.bp_addr : 0;
  268. return 0;
  269. }
  270. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  271. struct task_struct *tsk,
  272. unsigned long idx)
  273. {
  274. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  275. if (!bp)
  276. bp = ptrace_hbp_create(note_type, tsk, idx);
  277. return bp;
  278. }
  279. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  280. struct task_struct *tsk,
  281. unsigned long idx,
  282. u32 uctrl)
  283. {
  284. int err;
  285. struct perf_event *bp;
  286. struct perf_event_attr attr;
  287. struct arch_hw_breakpoint_ctrl ctrl;
  288. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  289. if (IS_ERR(bp)) {
  290. err = PTR_ERR(bp);
  291. return err;
  292. }
  293. attr = bp->attr;
  294. decode_ctrl_reg(uctrl, &ctrl);
  295. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  296. if (err)
  297. return err;
  298. return modify_user_hw_breakpoint(bp, &attr);
  299. }
  300. static int ptrace_hbp_set_addr(unsigned int note_type,
  301. struct task_struct *tsk,
  302. unsigned long idx,
  303. u64 addr)
  304. {
  305. int err;
  306. struct perf_event *bp;
  307. struct perf_event_attr attr;
  308. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  309. if (IS_ERR(bp)) {
  310. err = PTR_ERR(bp);
  311. return err;
  312. }
  313. attr = bp->attr;
  314. attr.bp_addr = addr;
  315. err = modify_user_hw_breakpoint(bp, &attr);
  316. return err;
  317. }
  318. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  319. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  320. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  321. static int hw_break_get(struct task_struct *target,
  322. const struct user_regset *regset,
  323. unsigned int pos, unsigned int count,
  324. void *kbuf, void __user *ubuf)
  325. {
  326. unsigned int note_type = regset->core_note_type;
  327. int ret, idx = 0, offset, limit;
  328. u32 info, ctrl;
  329. u64 addr;
  330. /* Resource info */
  331. ret = ptrace_hbp_get_resource_info(note_type, &info);
  332. if (ret)
  333. return ret;
  334. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  335. sizeof(info));
  336. if (ret)
  337. return ret;
  338. /* Pad */
  339. offset = offsetof(struct user_hwdebug_state, pad);
  340. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  341. offset + PTRACE_HBP_PAD_SZ);
  342. if (ret)
  343. return ret;
  344. /* (address, ctrl) registers */
  345. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  346. limit = regset->n * regset->size;
  347. while (count && offset < limit) {
  348. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  349. if (ret)
  350. return ret;
  351. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  352. offset, offset + PTRACE_HBP_ADDR_SZ);
  353. if (ret)
  354. return ret;
  355. offset += PTRACE_HBP_ADDR_SZ;
  356. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  357. if (ret)
  358. return ret;
  359. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  360. offset, offset + PTRACE_HBP_CTRL_SZ);
  361. if (ret)
  362. return ret;
  363. offset += PTRACE_HBP_CTRL_SZ;
  364. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  365. offset,
  366. offset + PTRACE_HBP_PAD_SZ);
  367. if (ret)
  368. return ret;
  369. offset += PTRACE_HBP_PAD_SZ;
  370. idx++;
  371. }
  372. return 0;
  373. }
  374. static int hw_break_set(struct task_struct *target,
  375. const struct user_regset *regset,
  376. unsigned int pos, unsigned int count,
  377. const void *kbuf, const void __user *ubuf)
  378. {
  379. unsigned int note_type = regset->core_note_type;
  380. int ret, idx = 0, offset, limit;
  381. u32 ctrl;
  382. u64 addr;
  383. /* Resource info and pad */
  384. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  385. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  386. if (ret)
  387. return ret;
  388. /* (address, ctrl) registers */
  389. limit = regset->n * regset->size;
  390. while (count && offset < limit) {
  391. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  392. offset, offset + PTRACE_HBP_ADDR_SZ);
  393. if (ret)
  394. return ret;
  395. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  396. if (ret)
  397. return ret;
  398. offset += PTRACE_HBP_ADDR_SZ;
  399. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  400. offset, offset + PTRACE_HBP_CTRL_SZ);
  401. if (ret)
  402. return ret;
  403. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  404. if (ret)
  405. return ret;
  406. offset += PTRACE_HBP_CTRL_SZ;
  407. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  408. offset,
  409. offset + PTRACE_HBP_PAD_SZ);
  410. if (ret)
  411. return ret;
  412. offset += PTRACE_HBP_PAD_SZ;
  413. idx++;
  414. }
  415. return 0;
  416. }
  417. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  418. static int gpr_get(struct task_struct *target,
  419. const struct user_regset *regset,
  420. unsigned int pos, unsigned int count,
  421. void *kbuf, void __user *ubuf)
  422. {
  423. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  424. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  425. }
  426. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  427. unsigned int pos, unsigned int count,
  428. const void *kbuf, const void __user *ubuf)
  429. {
  430. int ret;
  431. struct user_pt_regs newregs;
  432. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  433. if (ret)
  434. return ret;
  435. if (!valid_user_regs(&newregs))
  436. return -EINVAL;
  437. task_pt_regs(target)->user_regs = newregs;
  438. return 0;
  439. }
  440. /*
  441. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  442. */
  443. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  444. unsigned int pos, unsigned int count,
  445. void *kbuf, void __user *ubuf)
  446. {
  447. struct user_fpsimd_state *uregs;
  448. uregs = &target->thread.fpsimd_state.user_fpsimd;
  449. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  450. }
  451. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  452. unsigned int pos, unsigned int count,
  453. const void *kbuf, const void __user *ubuf)
  454. {
  455. int ret;
  456. struct user_fpsimd_state newstate;
  457. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  458. if (ret)
  459. return ret;
  460. target->thread.fpsimd_state.user_fpsimd = newstate;
  461. return ret;
  462. }
  463. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  464. unsigned int pos, unsigned int count,
  465. void *kbuf, void __user *ubuf)
  466. {
  467. unsigned long *tls = &target->thread.tp_value;
  468. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  469. }
  470. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  471. unsigned int pos, unsigned int count,
  472. const void *kbuf, const void __user *ubuf)
  473. {
  474. int ret;
  475. unsigned long tls;
  476. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  477. if (ret)
  478. return ret;
  479. target->thread.tp_value = tls;
  480. return ret;
  481. }
  482. enum aarch64_regset {
  483. REGSET_GPR,
  484. REGSET_FPR,
  485. REGSET_TLS,
  486. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  487. REGSET_HW_BREAK,
  488. REGSET_HW_WATCH,
  489. #endif
  490. };
  491. static const struct user_regset aarch64_regsets[] = {
  492. [REGSET_GPR] = {
  493. .core_note_type = NT_PRSTATUS,
  494. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  495. .size = sizeof(u64),
  496. .align = sizeof(u64),
  497. .get = gpr_get,
  498. .set = gpr_set
  499. },
  500. [REGSET_FPR] = {
  501. .core_note_type = NT_PRFPREG,
  502. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  503. /*
  504. * We pretend we have 32-bit registers because the fpsr and
  505. * fpcr are 32-bits wide.
  506. */
  507. .size = sizeof(u32),
  508. .align = sizeof(u32),
  509. .get = fpr_get,
  510. .set = fpr_set
  511. },
  512. [REGSET_TLS] = {
  513. .core_note_type = NT_ARM_TLS,
  514. .n = 1,
  515. .size = sizeof(void *),
  516. .align = sizeof(void *),
  517. .get = tls_get,
  518. .set = tls_set,
  519. },
  520. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  521. [REGSET_HW_BREAK] = {
  522. .core_note_type = NT_ARM_HW_BREAK,
  523. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  524. .size = sizeof(u32),
  525. .align = sizeof(u32),
  526. .get = hw_break_get,
  527. .set = hw_break_set,
  528. },
  529. [REGSET_HW_WATCH] = {
  530. .core_note_type = NT_ARM_HW_WATCH,
  531. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  532. .size = sizeof(u32),
  533. .align = sizeof(u32),
  534. .get = hw_break_get,
  535. .set = hw_break_set,
  536. },
  537. #endif
  538. };
  539. static const struct user_regset_view user_aarch64_view = {
  540. .name = "aarch64", .e_machine = EM_AARCH64,
  541. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  542. };
  543. #ifdef CONFIG_COMPAT
  544. #include <linux/compat.h>
  545. enum compat_regset {
  546. REGSET_COMPAT_GPR,
  547. REGSET_COMPAT_VFP,
  548. };
  549. static int compat_gpr_get(struct task_struct *target,
  550. const struct user_regset *regset,
  551. unsigned int pos, unsigned int count,
  552. void *kbuf, void __user *ubuf)
  553. {
  554. int ret = 0;
  555. unsigned int i, start, num_regs;
  556. /* Calculate the number of AArch32 registers contained in count */
  557. num_regs = count / regset->size;
  558. /* Convert pos into an register number */
  559. start = pos / regset->size;
  560. if (start + num_regs > regset->n)
  561. return -EIO;
  562. for (i = 0; i < num_regs; ++i) {
  563. unsigned int idx = start + i;
  564. void *reg;
  565. switch (idx) {
  566. case 15:
  567. reg = (void *)&task_pt_regs(target)->pc;
  568. break;
  569. case 16:
  570. reg = (void *)&task_pt_regs(target)->pstate;
  571. break;
  572. case 17:
  573. reg = (void *)&task_pt_regs(target)->orig_x0;
  574. break;
  575. default:
  576. reg = (void *)&task_pt_regs(target)->regs[idx];
  577. }
  578. ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
  579. if (ret)
  580. break;
  581. else
  582. ubuf += sizeof(compat_ulong_t);
  583. }
  584. return ret;
  585. }
  586. static int compat_gpr_set(struct task_struct *target,
  587. const struct user_regset *regset,
  588. unsigned int pos, unsigned int count,
  589. const void *kbuf, const void __user *ubuf)
  590. {
  591. struct pt_regs newregs;
  592. int ret = 0;
  593. unsigned int i, start, num_regs;
  594. /* Calculate the number of AArch32 registers contained in count */
  595. num_regs = count / regset->size;
  596. /* Convert pos into an register number */
  597. start = pos / regset->size;
  598. if (start + num_regs > regset->n)
  599. return -EIO;
  600. newregs = *task_pt_regs(target);
  601. for (i = 0; i < num_regs; ++i) {
  602. unsigned int idx = start + i;
  603. void *reg;
  604. switch (idx) {
  605. case 15:
  606. reg = (void *)&newregs.pc;
  607. break;
  608. case 16:
  609. reg = (void *)&newregs.pstate;
  610. break;
  611. case 17:
  612. reg = (void *)&newregs.orig_x0;
  613. break;
  614. default:
  615. reg = (void *)&newregs.regs[idx];
  616. }
  617. ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
  618. if (ret)
  619. goto out;
  620. else
  621. ubuf += sizeof(compat_ulong_t);
  622. }
  623. if (valid_user_regs(&newregs.user_regs))
  624. *task_pt_regs(target) = newregs;
  625. else
  626. ret = -EINVAL;
  627. out:
  628. return ret;
  629. }
  630. static int compat_vfp_get(struct task_struct *target,
  631. const struct user_regset *regset,
  632. unsigned int pos, unsigned int count,
  633. void *kbuf, void __user *ubuf)
  634. {
  635. struct user_fpsimd_state *uregs;
  636. compat_ulong_t fpscr;
  637. int ret;
  638. uregs = &target->thread.fpsimd_state.user_fpsimd;
  639. /*
  640. * The VFP registers are packed into the fpsimd_state, so they all sit
  641. * nicely together for us. We just need to create the fpscr separately.
  642. */
  643. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
  644. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  645. if (count && !ret) {
  646. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  647. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  648. ret = put_user(fpscr, (compat_ulong_t *)ubuf);
  649. }
  650. return ret;
  651. }
  652. static int compat_vfp_set(struct task_struct *target,
  653. const struct user_regset *regset,
  654. unsigned int pos, unsigned int count,
  655. const void *kbuf, const void __user *ubuf)
  656. {
  657. struct user_fpsimd_state *uregs;
  658. compat_ulong_t fpscr;
  659. int ret;
  660. if (pos + count > VFP_STATE_SIZE)
  661. return -EIO;
  662. uregs = &target->thread.fpsimd_state.user_fpsimd;
  663. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  664. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  665. if (count && !ret) {
  666. ret = get_user(fpscr, (compat_ulong_t *)ubuf);
  667. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  668. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  669. }
  670. return ret;
  671. }
  672. static const struct user_regset aarch32_regsets[] = {
  673. [REGSET_COMPAT_GPR] = {
  674. .core_note_type = NT_PRSTATUS,
  675. .n = COMPAT_ELF_NGREG,
  676. .size = sizeof(compat_elf_greg_t),
  677. .align = sizeof(compat_elf_greg_t),
  678. .get = compat_gpr_get,
  679. .set = compat_gpr_set
  680. },
  681. [REGSET_COMPAT_VFP] = {
  682. .core_note_type = NT_ARM_VFP,
  683. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  684. .size = sizeof(compat_ulong_t),
  685. .align = sizeof(compat_ulong_t),
  686. .get = compat_vfp_get,
  687. .set = compat_vfp_set
  688. },
  689. };
  690. static const struct user_regset_view user_aarch32_view = {
  691. .name = "aarch32", .e_machine = EM_ARM,
  692. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  693. };
  694. int aarch32_break_trap(struct pt_regs *regs)
  695. {
  696. unsigned int instr;
  697. bool bp = false;
  698. void __user *pc = (void __user *)instruction_pointer(regs);
  699. if (compat_thumb_mode(regs)) {
  700. /* get 16-bit Thumb instruction */
  701. get_user(instr, (u16 __user *)pc);
  702. if (instr == AARCH32_BREAK_THUMB2_LO) {
  703. /* get second half of 32-bit Thumb-2 instruction */
  704. get_user(instr, (u16 __user *)(pc + 2));
  705. bp = instr == AARCH32_BREAK_THUMB2_HI;
  706. } else {
  707. bp = instr == AARCH32_BREAK_THUMB;
  708. }
  709. } else {
  710. /* 32-bit ARM instruction */
  711. get_user(instr, (u32 __user *)pc);
  712. bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
  713. }
  714. if (bp)
  715. return ptrace_break(regs);
  716. return 1;
  717. }
  718. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  719. compat_ulong_t __user *ret)
  720. {
  721. compat_ulong_t tmp;
  722. if (off & 3)
  723. return -EIO;
  724. if (off == COMPAT_PT_TEXT_ADDR)
  725. tmp = tsk->mm->start_code;
  726. else if (off == COMPAT_PT_DATA_ADDR)
  727. tmp = tsk->mm->start_data;
  728. else if (off == COMPAT_PT_TEXT_END_ADDR)
  729. tmp = tsk->mm->end_code;
  730. else if (off < sizeof(compat_elf_gregset_t))
  731. return copy_regset_to_user(tsk, &user_aarch32_view,
  732. REGSET_COMPAT_GPR, off,
  733. sizeof(compat_ulong_t), ret);
  734. else if (off >= COMPAT_USER_SZ)
  735. return -EIO;
  736. else
  737. tmp = 0;
  738. return put_user(tmp, ret);
  739. }
  740. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  741. compat_ulong_t val)
  742. {
  743. int ret;
  744. if (off & 3 || off >= COMPAT_USER_SZ)
  745. return -EIO;
  746. if (off >= sizeof(compat_elf_gregset_t))
  747. return 0;
  748. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  749. REGSET_COMPAT_GPR, off,
  750. sizeof(compat_ulong_t),
  751. &val);
  752. return ret;
  753. }
  754. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  755. /*
  756. * Convert a virtual register number into an index for a thread_info
  757. * breakpoint array. Breakpoints are identified using positive numbers
  758. * whilst watchpoints are negative. The registers are laid out as pairs
  759. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  760. * Register 0 is reserved for describing resource information.
  761. */
  762. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  763. {
  764. return (abs(num) - 1) >> 1;
  765. }
  766. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  767. {
  768. u8 num_brps, num_wrps, debug_arch, wp_len;
  769. u32 reg = 0;
  770. num_brps = hw_breakpoint_slots(TYPE_INST);
  771. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  772. debug_arch = debug_monitors_arch();
  773. wp_len = 8;
  774. reg |= debug_arch;
  775. reg <<= 8;
  776. reg |= wp_len;
  777. reg <<= 8;
  778. reg |= num_wrps;
  779. reg <<= 8;
  780. reg |= num_brps;
  781. *kdata = reg;
  782. return 0;
  783. }
  784. static int compat_ptrace_hbp_get(unsigned int note_type,
  785. struct task_struct *tsk,
  786. compat_long_t num,
  787. u32 *kdata)
  788. {
  789. u64 addr = 0;
  790. u32 ctrl = 0;
  791. int err, idx = compat_ptrace_hbp_num_to_idx(num);;
  792. if (num & 1) {
  793. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  794. *kdata = (u32)addr;
  795. } else {
  796. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  797. *kdata = ctrl;
  798. }
  799. return err;
  800. }
  801. static int compat_ptrace_hbp_set(unsigned int note_type,
  802. struct task_struct *tsk,
  803. compat_long_t num,
  804. u32 *kdata)
  805. {
  806. u64 addr;
  807. u32 ctrl;
  808. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  809. if (num & 1) {
  810. addr = *kdata;
  811. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  812. } else {
  813. ctrl = *kdata;
  814. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  815. }
  816. return err;
  817. }
  818. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  819. compat_ulong_t __user *data)
  820. {
  821. int ret;
  822. u32 kdata;
  823. mm_segment_t old_fs = get_fs();
  824. set_fs(KERNEL_DS);
  825. /* Watchpoint */
  826. if (num < 0) {
  827. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  828. /* Resource info */
  829. } else if (num == 0) {
  830. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  831. /* Breakpoint */
  832. } else {
  833. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  834. }
  835. set_fs(old_fs);
  836. if (!ret)
  837. ret = put_user(kdata, data);
  838. return ret;
  839. }
  840. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  841. compat_ulong_t __user *data)
  842. {
  843. int ret;
  844. u32 kdata = 0;
  845. mm_segment_t old_fs = get_fs();
  846. if (num == 0)
  847. return 0;
  848. ret = get_user(kdata, data);
  849. if (ret)
  850. return ret;
  851. set_fs(KERNEL_DS);
  852. if (num < 0)
  853. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  854. else
  855. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  856. set_fs(old_fs);
  857. return ret;
  858. }
  859. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  860. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  861. compat_ulong_t caddr, compat_ulong_t cdata)
  862. {
  863. unsigned long addr = caddr;
  864. unsigned long data = cdata;
  865. void __user *datap = compat_ptr(data);
  866. int ret;
  867. switch (request) {
  868. case PTRACE_PEEKUSR:
  869. ret = compat_ptrace_read_user(child, addr, datap);
  870. break;
  871. case PTRACE_POKEUSR:
  872. ret = compat_ptrace_write_user(child, addr, data);
  873. break;
  874. case COMPAT_PTRACE_GETREGS:
  875. ret = copy_regset_to_user(child,
  876. &user_aarch32_view,
  877. REGSET_COMPAT_GPR,
  878. 0, sizeof(compat_elf_gregset_t),
  879. datap);
  880. break;
  881. case COMPAT_PTRACE_SETREGS:
  882. ret = copy_regset_from_user(child,
  883. &user_aarch32_view,
  884. REGSET_COMPAT_GPR,
  885. 0, sizeof(compat_elf_gregset_t),
  886. datap);
  887. break;
  888. case COMPAT_PTRACE_GET_THREAD_AREA:
  889. ret = put_user((compat_ulong_t)child->thread.tp_value,
  890. (compat_ulong_t __user *)datap);
  891. break;
  892. case COMPAT_PTRACE_SET_SYSCALL:
  893. task_pt_regs(child)->syscallno = data;
  894. ret = 0;
  895. break;
  896. case COMPAT_PTRACE_GETVFPREGS:
  897. ret = copy_regset_to_user(child,
  898. &user_aarch32_view,
  899. REGSET_COMPAT_VFP,
  900. 0, VFP_STATE_SIZE,
  901. datap);
  902. break;
  903. case COMPAT_PTRACE_SETVFPREGS:
  904. ret = copy_regset_from_user(child,
  905. &user_aarch32_view,
  906. REGSET_COMPAT_VFP,
  907. 0, VFP_STATE_SIZE,
  908. datap);
  909. break;
  910. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  911. case COMPAT_PTRACE_GETHBPREGS:
  912. ret = compat_ptrace_gethbpregs(child, addr, datap);
  913. break;
  914. case COMPAT_PTRACE_SETHBPREGS:
  915. ret = compat_ptrace_sethbpregs(child, addr, datap);
  916. break;
  917. #endif
  918. default:
  919. ret = compat_ptrace_request(child, request, addr,
  920. data);
  921. break;
  922. }
  923. return ret;
  924. }
  925. #endif /* CONFIG_COMPAT */
  926. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  927. {
  928. #ifdef CONFIG_COMPAT
  929. if (is_compat_thread(task_thread_info(task)))
  930. return &user_aarch32_view;
  931. #endif
  932. return &user_aarch64_view;
  933. }
  934. long arch_ptrace(struct task_struct *child, long request,
  935. unsigned long addr, unsigned long data)
  936. {
  937. return ptrace_request(child, request, addr, data);
  938. }
  939. static int __init ptrace_break_init(void)
  940. {
  941. hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
  942. TRAP_BRKPT, "ptrace BRK handler");
  943. return 0;
  944. }
  945. core_initcall(ptrace_break_init);
  946. asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
  947. {
  948. unsigned long saved_reg;
  949. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  950. return regs->syscallno;
  951. if (is_compat_task()) {
  952. /* AArch32 uses ip (r12) for scratch */
  953. saved_reg = regs->regs[12];
  954. regs->regs[12] = dir;
  955. } else {
  956. /*
  957. * Save X7. X7 is used to denote syscall entry/exit:
  958. * X7 = 0 -> entry, = 1 -> exit
  959. */
  960. saved_reg = regs->regs[7];
  961. regs->regs[7] = dir;
  962. }
  963. if (dir)
  964. tracehook_report_syscall_exit(regs, 0);
  965. else if (tracehook_report_syscall_entry(regs))
  966. regs->syscallno = ~0UL;
  967. if (is_compat_task())
  968. regs->regs[12] = saved_reg;
  969. else
  970. regs->regs[7] = saved_reg;
  971. return regs->syscallno;
  972. }