ptrace.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/user.h>
  27. #include <linux/security.h>
  28. #include <linux/init.h>
  29. #include <linux/signal.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/perf_event.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/regset.h>
  34. #include <linux/tracehook.h>
  35. #include <linux/elf.h>
  36. #include <asm/compat.h>
  37. #include <asm/debug-monitors.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/traps.h>
  40. #include <asm/system_misc.h>
  41. /*
  42. * TODO: does not yet catch signals sent when the child dies.
  43. * in exit.c or in signal.c.
  44. */
  45. /*
  46. * Called by kernel/ptrace.c when detaching..
  47. */
  48. void ptrace_disable(struct task_struct *child)
  49. {
  50. }
  51. /*
  52. * Handle hitting a breakpoint.
  53. */
  54. static int ptrace_break(struct pt_regs *regs)
  55. {
  56. siginfo_t info = {
  57. .si_signo = SIGTRAP,
  58. .si_errno = 0,
  59. .si_code = TRAP_BRKPT,
  60. .si_addr = (void __user *)instruction_pointer(regs),
  61. };
  62. force_sig_info(SIGTRAP, &info, current);
  63. return 0;
  64. }
  65. static int arm64_break_trap(unsigned long addr, unsigned int esr,
  66. struct pt_regs *regs)
  67. {
  68. return ptrace_break(regs);
  69. }
  70. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  71. /*
  72. * Handle hitting a HW-breakpoint.
  73. */
  74. static void ptrace_hbptriggered(struct perf_event *bp,
  75. struct perf_sample_data *data,
  76. struct pt_regs *regs)
  77. {
  78. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  79. siginfo_t info = {
  80. .si_signo = SIGTRAP,
  81. .si_errno = 0,
  82. .si_code = TRAP_HWBKPT,
  83. .si_addr = (void __user *)(bkpt->trigger),
  84. };
  85. #ifdef CONFIG_COMPAT
  86. int i;
  87. if (!is_compat_task())
  88. goto send_sig;
  89. for (i = 0; i < ARM_MAX_BRP; ++i) {
  90. if (current->thread.debug.hbp_break[i] == bp) {
  91. info.si_errno = (i << 1) + 1;
  92. break;
  93. }
  94. }
  95. for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
  96. if (current->thread.debug.hbp_watch[i] == bp) {
  97. info.si_errno = -((i << 1) + 1);
  98. break;
  99. }
  100. }
  101. send_sig:
  102. #endif
  103. force_sig_info(SIGTRAP, &info, current);
  104. }
  105. /*
  106. * Unregister breakpoints from this task and reset the pointers in
  107. * the thread_struct.
  108. */
  109. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  110. {
  111. int i;
  112. struct thread_struct *t = &tsk->thread;
  113. for (i = 0; i < ARM_MAX_BRP; i++) {
  114. if (t->debug.hbp_break[i]) {
  115. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  116. t->debug.hbp_break[i] = NULL;
  117. }
  118. }
  119. for (i = 0; i < ARM_MAX_WRP; i++) {
  120. if (t->debug.hbp_watch[i]) {
  121. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  122. t->debug.hbp_watch[i] = NULL;
  123. }
  124. }
  125. }
  126. void ptrace_hw_copy_thread(struct task_struct *tsk)
  127. {
  128. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  129. }
  130. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  131. struct task_struct *tsk,
  132. unsigned long idx)
  133. {
  134. struct perf_event *bp = ERR_PTR(-EINVAL);
  135. switch (note_type) {
  136. case NT_ARM_HW_BREAK:
  137. if (idx < ARM_MAX_BRP)
  138. bp = tsk->thread.debug.hbp_break[idx];
  139. break;
  140. case NT_ARM_HW_WATCH:
  141. if (idx < ARM_MAX_WRP)
  142. bp = tsk->thread.debug.hbp_watch[idx];
  143. break;
  144. }
  145. return bp;
  146. }
  147. static int ptrace_hbp_set_event(unsigned int note_type,
  148. struct task_struct *tsk,
  149. unsigned long idx,
  150. struct perf_event *bp)
  151. {
  152. int err = -EINVAL;
  153. switch (note_type) {
  154. case NT_ARM_HW_BREAK:
  155. if (idx < ARM_MAX_BRP) {
  156. tsk->thread.debug.hbp_break[idx] = bp;
  157. err = 0;
  158. }
  159. break;
  160. case NT_ARM_HW_WATCH:
  161. if (idx < ARM_MAX_WRP) {
  162. tsk->thread.debug.hbp_watch[idx] = bp;
  163. err = 0;
  164. }
  165. break;
  166. }
  167. return err;
  168. }
  169. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  170. struct task_struct *tsk,
  171. unsigned long idx)
  172. {
  173. struct perf_event *bp;
  174. struct perf_event_attr attr;
  175. int err, type;
  176. switch (note_type) {
  177. case NT_ARM_HW_BREAK:
  178. type = HW_BREAKPOINT_X;
  179. break;
  180. case NT_ARM_HW_WATCH:
  181. type = HW_BREAKPOINT_RW;
  182. break;
  183. default:
  184. return ERR_PTR(-EINVAL);
  185. }
  186. ptrace_breakpoint_init(&attr);
  187. /*
  188. * Initialise fields to sane defaults
  189. * (i.e. values that will pass validation).
  190. */
  191. attr.bp_addr = 0;
  192. attr.bp_len = HW_BREAKPOINT_LEN_4;
  193. attr.bp_type = type;
  194. attr.disabled = 1;
  195. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  196. if (IS_ERR(bp))
  197. return bp;
  198. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  199. if (err)
  200. return ERR_PTR(err);
  201. return bp;
  202. }
  203. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  204. struct arch_hw_breakpoint_ctrl ctrl,
  205. struct perf_event_attr *attr)
  206. {
  207. int err, len, type, disabled = !ctrl.enabled;
  208. if (disabled) {
  209. len = 0;
  210. type = HW_BREAKPOINT_EMPTY;
  211. } else {
  212. err = arch_bp_generic_fields(ctrl, &len, &type);
  213. if (err)
  214. return err;
  215. switch (note_type) {
  216. case NT_ARM_HW_BREAK:
  217. if ((type & HW_BREAKPOINT_X) != type)
  218. return -EINVAL;
  219. break;
  220. case NT_ARM_HW_WATCH:
  221. if ((type & HW_BREAKPOINT_RW) != type)
  222. return -EINVAL;
  223. break;
  224. default:
  225. return -EINVAL;
  226. }
  227. }
  228. attr->bp_len = len;
  229. attr->bp_type = type;
  230. attr->disabled = disabled;
  231. return 0;
  232. }
  233. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  234. {
  235. u8 num;
  236. u32 reg = 0;
  237. switch (note_type) {
  238. case NT_ARM_HW_BREAK:
  239. num = hw_breakpoint_slots(TYPE_INST);
  240. break;
  241. case NT_ARM_HW_WATCH:
  242. num = hw_breakpoint_slots(TYPE_DATA);
  243. break;
  244. default:
  245. return -EINVAL;
  246. }
  247. reg |= debug_monitors_arch();
  248. reg <<= 8;
  249. reg |= num;
  250. *info = reg;
  251. return 0;
  252. }
  253. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  254. struct task_struct *tsk,
  255. unsigned long idx,
  256. u32 *ctrl)
  257. {
  258. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  259. if (IS_ERR(bp))
  260. return PTR_ERR(bp);
  261. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  262. return 0;
  263. }
  264. static int ptrace_hbp_get_addr(unsigned int note_type,
  265. struct task_struct *tsk,
  266. unsigned long idx,
  267. u64 *addr)
  268. {
  269. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  270. if (IS_ERR(bp))
  271. return PTR_ERR(bp);
  272. *addr = bp ? bp->attr.bp_addr : 0;
  273. return 0;
  274. }
  275. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  276. struct task_struct *tsk,
  277. unsigned long idx)
  278. {
  279. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  280. if (!bp)
  281. bp = ptrace_hbp_create(note_type, tsk, idx);
  282. return bp;
  283. }
  284. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  285. struct task_struct *tsk,
  286. unsigned long idx,
  287. u32 uctrl)
  288. {
  289. int err;
  290. struct perf_event *bp;
  291. struct perf_event_attr attr;
  292. struct arch_hw_breakpoint_ctrl ctrl;
  293. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  294. if (IS_ERR(bp)) {
  295. err = PTR_ERR(bp);
  296. return err;
  297. }
  298. attr = bp->attr;
  299. decode_ctrl_reg(uctrl, &ctrl);
  300. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  301. if (err)
  302. return err;
  303. return modify_user_hw_breakpoint(bp, &attr);
  304. }
  305. static int ptrace_hbp_set_addr(unsigned int note_type,
  306. struct task_struct *tsk,
  307. unsigned long idx,
  308. u64 addr)
  309. {
  310. int err;
  311. struct perf_event *bp;
  312. struct perf_event_attr attr;
  313. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  314. if (IS_ERR(bp)) {
  315. err = PTR_ERR(bp);
  316. return err;
  317. }
  318. attr = bp->attr;
  319. attr.bp_addr = addr;
  320. err = modify_user_hw_breakpoint(bp, &attr);
  321. return err;
  322. }
  323. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  324. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  325. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  326. static int hw_break_get(struct task_struct *target,
  327. const struct user_regset *regset,
  328. unsigned int pos, unsigned int count,
  329. void *kbuf, void __user *ubuf)
  330. {
  331. unsigned int note_type = regset->core_note_type;
  332. int ret, idx = 0, offset, limit;
  333. u32 info, ctrl;
  334. u64 addr;
  335. /* Resource info */
  336. ret = ptrace_hbp_get_resource_info(note_type, &info);
  337. if (ret)
  338. return ret;
  339. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  340. sizeof(info));
  341. if (ret)
  342. return ret;
  343. /* Pad */
  344. offset = offsetof(struct user_hwdebug_state, pad);
  345. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  346. offset + PTRACE_HBP_PAD_SZ);
  347. if (ret)
  348. return ret;
  349. /* (address, ctrl) registers */
  350. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  351. limit = regset->n * regset->size;
  352. while (count && offset < limit) {
  353. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  354. if (ret)
  355. return ret;
  356. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  357. offset, offset + PTRACE_HBP_ADDR_SZ);
  358. if (ret)
  359. return ret;
  360. offset += PTRACE_HBP_ADDR_SZ;
  361. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  362. if (ret)
  363. return ret;
  364. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  365. offset, offset + PTRACE_HBP_CTRL_SZ);
  366. if (ret)
  367. return ret;
  368. offset += PTRACE_HBP_CTRL_SZ;
  369. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  370. offset,
  371. offset + PTRACE_HBP_PAD_SZ);
  372. if (ret)
  373. return ret;
  374. offset += PTRACE_HBP_PAD_SZ;
  375. idx++;
  376. }
  377. return 0;
  378. }
  379. static int hw_break_set(struct task_struct *target,
  380. const struct user_regset *regset,
  381. unsigned int pos, unsigned int count,
  382. const void *kbuf, const void __user *ubuf)
  383. {
  384. unsigned int note_type = regset->core_note_type;
  385. int ret, idx = 0, offset, limit;
  386. u32 ctrl;
  387. u64 addr;
  388. /* Resource info and pad */
  389. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  390. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  391. if (ret)
  392. return ret;
  393. /* (address, ctrl) registers */
  394. limit = regset->n * regset->size;
  395. while (count && offset < limit) {
  396. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  397. offset, offset + PTRACE_HBP_ADDR_SZ);
  398. if (ret)
  399. return ret;
  400. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  401. if (ret)
  402. return ret;
  403. offset += PTRACE_HBP_ADDR_SZ;
  404. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  405. offset, offset + PTRACE_HBP_CTRL_SZ);
  406. if (ret)
  407. return ret;
  408. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  409. if (ret)
  410. return ret;
  411. offset += PTRACE_HBP_CTRL_SZ;
  412. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  413. offset,
  414. offset + PTRACE_HBP_PAD_SZ);
  415. if (ret)
  416. return ret;
  417. offset += PTRACE_HBP_PAD_SZ;
  418. idx++;
  419. }
  420. return 0;
  421. }
  422. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  423. static int gpr_get(struct task_struct *target,
  424. const struct user_regset *regset,
  425. unsigned int pos, unsigned int count,
  426. void *kbuf, void __user *ubuf)
  427. {
  428. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  429. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  430. }
  431. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  432. unsigned int pos, unsigned int count,
  433. const void *kbuf, const void __user *ubuf)
  434. {
  435. int ret;
  436. struct user_pt_regs newregs;
  437. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  438. if (ret)
  439. return ret;
  440. if (!valid_user_regs(&newregs))
  441. return -EINVAL;
  442. task_pt_regs(target)->user_regs = newregs;
  443. return 0;
  444. }
  445. /*
  446. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  447. */
  448. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  449. unsigned int pos, unsigned int count,
  450. void *kbuf, void __user *ubuf)
  451. {
  452. struct user_fpsimd_state *uregs;
  453. uregs = &target->thread.fpsimd_state.user_fpsimd;
  454. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  455. }
  456. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  457. unsigned int pos, unsigned int count,
  458. const void *kbuf, const void __user *ubuf)
  459. {
  460. int ret;
  461. struct user_fpsimd_state newstate;
  462. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  463. if (ret)
  464. return ret;
  465. target->thread.fpsimd_state.user_fpsimd = newstate;
  466. return ret;
  467. }
  468. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  469. unsigned int pos, unsigned int count,
  470. void *kbuf, void __user *ubuf)
  471. {
  472. unsigned long *tls = &target->thread.tp_value;
  473. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  474. }
  475. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  476. unsigned int pos, unsigned int count,
  477. const void *kbuf, const void __user *ubuf)
  478. {
  479. int ret;
  480. unsigned long tls;
  481. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  482. if (ret)
  483. return ret;
  484. target->thread.tp_value = tls;
  485. return ret;
  486. }
  487. enum aarch64_regset {
  488. REGSET_GPR,
  489. REGSET_FPR,
  490. REGSET_TLS,
  491. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  492. REGSET_HW_BREAK,
  493. REGSET_HW_WATCH,
  494. #endif
  495. };
  496. static const struct user_regset aarch64_regsets[] = {
  497. [REGSET_GPR] = {
  498. .core_note_type = NT_PRSTATUS,
  499. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  500. .size = sizeof(u64),
  501. .align = sizeof(u64),
  502. .get = gpr_get,
  503. .set = gpr_set
  504. },
  505. [REGSET_FPR] = {
  506. .core_note_type = NT_PRFPREG,
  507. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  508. /*
  509. * We pretend we have 32-bit registers because the fpsr and
  510. * fpcr are 32-bits wide.
  511. */
  512. .size = sizeof(u32),
  513. .align = sizeof(u32),
  514. .get = fpr_get,
  515. .set = fpr_set
  516. },
  517. [REGSET_TLS] = {
  518. .core_note_type = NT_ARM_TLS,
  519. .n = 1,
  520. .size = sizeof(void *),
  521. .align = sizeof(void *),
  522. .get = tls_get,
  523. .set = tls_set,
  524. },
  525. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  526. [REGSET_HW_BREAK] = {
  527. .core_note_type = NT_ARM_HW_BREAK,
  528. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  529. .size = sizeof(u32),
  530. .align = sizeof(u32),
  531. .get = hw_break_get,
  532. .set = hw_break_set,
  533. },
  534. [REGSET_HW_WATCH] = {
  535. .core_note_type = NT_ARM_HW_WATCH,
  536. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  537. .size = sizeof(u32),
  538. .align = sizeof(u32),
  539. .get = hw_break_get,
  540. .set = hw_break_set,
  541. },
  542. #endif
  543. };
  544. static const struct user_regset_view user_aarch64_view = {
  545. .name = "aarch64", .e_machine = EM_AARCH64,
  546. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  547. };
  548. #ifdef CONFIG_COMPAT
  549. #include <linux/compat.h>
  550. enum compat_regset {
  551. REGSET_COMPAT_GPR,
  552. REGSET_COMPAT_VFP,
  553. };
  554. static int compat_gpr_get(struct task_struct *target,
  555. const struct user_regset *regset,
  556. unsigned int pos, unsigned int count,
  557. void *kbuf, void __user *ubuf)
  558. {
  559. int ret = 0;
  560. unsigned int i, start, num_regs;
  561. /* Calculate the number of AArch32 registers contained in count */
  562. num_regs = count / regset->size;
  563. /* Convert pos into an register number */
  564. start = pos / regset->size;
  565. if (start + num_regs > regset->n)
  566. return -EIO;
  567. for (i = 0; i < num_regs; ++i) {
  568. unsigned int idx = start + i;
  569. void *reg;
  570. switch (idx) {
  571. case 15:
  572. reg = (void *)&task_pt_regs(target)->pc;
  573. break;
  574. case 16:
  575. reg = (void *)&task_pt_regs(target)->pstate;
  576. break;
  577. case 17:
  578. reg = (void *)&task_pt_regs(target)->orig_x0;
  579. break;
  580. default:
  581. reg = (void *)&task_pt_regs(target)->regs[idx];
  582. }
  583. ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
  584. if (ret)
  585. break;
  586. else
  587. ubuf += sizeof(compat_ulong_t);
  588. }
  589. return ret;
  590. }
  591. static int compat_gpr_set(struct task_struct *target,
  592. const struct user_regset *regset,
  593. unsigned int pos, unsigned int count,
  594. const void *kbuf, const void __user *ubuf)
  595. {
  596. struct pt_regs newregs;
  597. int ret = 0;
  598. unsigned int i, start, num_regs;
  599. /* Calculate the number of AArch32 registers contained in count */
  600. num_regs = count / regset->size;
  601. /* Convert pos into an register number */
  602. start = pos / regset->size;
  603. if (start + num_regs > regset->n)
  604. return -EIO;
  605. newregs = *task_pt_regs(target);
  606. for (i = 0; i < num_regs; ++i) {
  607. unsigned int idx = start + i;
  608. void *reg;
  609. switch (idx) {
  610. case 15:
  611. reg = (void *)&newregs.pc;
  612. break;
  613. case 16:
  614. reg = (void *)&newregs.pstate;
  615. break;
  616. case 17:
  617. reg = (void *)&newregs.orig_x0;
  618. break;
  619. default:
  620. reg = (void *)&newregs.regs[idx];
  621. }
  622. ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
  623. if (ret)
  624. goto out;
  625. else
  626. ubuf += sizeof(compat_ulong_t);
  627. }
  628. if (valid_user_regs(&newregs.user_regs))
  629. *task_pt_regs(target) = newregs;
  630. else
  631. ret = -EINVAL;
  632. out:
  633. return ret;
  634. }
  635. static int compat_vfp_get(struct task_struct *target,
  636. const struct user_regset *regset,
  637. unsigned int pos, unsigned int count,
  638. void *kbuf, void __user *ubuf)
  639. {
  640. struct user_fpsimd_state *uregs;
  641. compat_ulong_t fpscr;
  642. int ret;
  643. uregs = &target->thread.fpsimd_state.user_fpsimd;
  644. /*
  645. * The VFP registers are packed into the fpsimd_state, so they all sit
  646. * nicely together for us. We just need to create the fpscr separately.
  647. */
  648. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
  649. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  650. if (count && !ret) {
  651. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  652. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  653. ret = put_user(fpscr, (compat_ulong_t *)ubuf);
  654. }
  655. return ret;
  656. }
  657. static int compat_vfp_set(struct task_struct *target,
  658. const struct user_regset *regset,
  659. unsigned int pos, unsigned int count,
  660. const void *kbuf, const void __user *ubuf)
  661. {
  662. struct user_fpsimd_state *uregs;
  663. compat_ulong_t fpscr;
  664. int ret;
  665. if (pos + count > VFP_STATE_SIZE)
  666. return -EIO;
  667. uregs = &target->thread.fpsimd_state.user_fpsimd;
  668. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  669. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  670. if (count && !ret) {
  671. ret = get_user(fpscr, (compat_ulong_t *)ubuf);
  672. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  673. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  674. }
  675. return ret;
  676. }
  677. static const struct user_regset aarch32_regsets[] = {
  678. [REGSET_COMPAT_GPR] = {
  679. .core_note_type = NT_PRSTATUS,
  680. .n = COMPAT_ELF_NGREG,
  681. .size = sizeof(compat_elf_greg_t),
  682. .align = sizeof(compat_elf_greg_t),
  683. .get = compat_gpr_get,
  684. .set = compat_gpr_set
  685. },
  686. [REGSET_COMPAT_VFP] = {
  687. .core_note_type = NT_ARM_VFP,
  688. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  689. .size = sizeof(compat_ulong_t),
  690. .align = sizeof(compat_ulong_t),
  691. .get = compat_vfp_get,
  692. .set = compat_vfp_set
  693. },
  694. };
  695. static const struct user_regset_view user_aarch32_view = {
  696. .name = "aarch32", .e_machine = EM_ARM,
  697. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  698. };
  699. int aarch32_break_trap(struct pt_regs *regs)
  700. {
  701. unsigned int instr;
  702. bool bp = false;
  703. void __user *pc = (void __user *)instruction_pointer(regs);
  704. if (compat_thumb_mode(regs)) {
  705. /* get 16-bit Thumb instruction */
  706. get_user(instr, (u16 __user *)pc);
  707. if (instr == AARCH32_BREAK_THUMB2_LO) {
  708. /* get second half of 32-bit Thumb-2 instruction */
  709. get_user(instr, (u16 __user *)(pc + 2));
  710. bp = instr == AARCH32_BREAK_THUMB2_HI;
  711. } else {
  712. bp = instr == AARCH32_BREAK_THUMB;
  713. }
  714. } else {
  715. /* 32-bit ARM instruction */
  716. get_user(instr, (u32 __user *)pc);
  717. bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM;
  718. }
  719. if (bp)
  720. return ptrace_break(regs);
  721. return 1;
  722. }
  723. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  724. compat_ulong_t __user *ret)
  725. {
  726. compat_ulong_t tmp;
  727. if (off & 3)
  728. return -EIO;
  729. if (off == COMPAT_PT_TEXT_ADDR)
  730. tmp = tsk->mm->start_code;
  731. else if (off == COMPAT_PT_DATA_ADDR)
  732. tmp = tsk->mm->start_data;
  733. else if (off == COMPAT_PT_TEXT_END_ADDR)
  734. tmp = tsk->mm->end_code;
  735. else if (off < sizeof(compat_elf_gregset_t))
  736. return copy_regset_to_user(tsk, &user_aarch32_view,
  737. REGSET_COMPAT_GPR, off,
  738. sizeof(compat_ulong_t), ret);
  739. else if (off >= COMPAT_USER_SZ)
  740. return -EIO;
  741. else
  742. tmp = 0;
  743. return put_user(tmp, ret);
  744. }
  745. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  746. compat_ulong_t val)
  747. {
  748. int ret;
  749. if (off & 3 || off >= COMPAT_USER_SZ)
  750. return -EIO;
  751. if (off >= sizeof(compat_elf_gregset_t))
  752. return 0;
  753. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  754. REGSET_COMPAT_GPR, off,
  755. sizeof(compat_ulong_t),
  756. &val);
  757. return ret;
  758. }
  759. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  760. /*
  761. * Convert a virtual register number into an index for a thread_info
  762. * breakpoint array. Breakpoints are identified using positive numbers
  763. * whilst watchpoints are negative. The registers are laid out as pairs
  764. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  765. * Register 0 is reserved for describing resource information.
  766. */
  767. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  768. {
  769. return (abs(num) - 1) >> 1;
  770. }
  771. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  772. {
  773. u8 num_brps, num_wrps, debug_arch, wp_len;
  774. u32 reg = 0;
  775. num_brps = hw_breakpoint_slots(TYPE_INST);
  776. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  777. debug_arch = debug_monitors_arch();
  778. wp_len = 8;
  779. reg |= debug_arch;
  780. reg <<= 8;
  781. reg |= wp_len;
  782. reg <<= 8;
  783. reg |= num_wrps;
  784. reg <<= 8;
  785. reg |= num_brps;
  786. *kdata = reg;
  787. return 0;
  788. }
  789. static int compat_ptrace_hbp_get(unsigned int note_type,
  790. struct task_struct *tsk,
  791. compat_long_t num,
  792. u32 *kdata)
  793. {
  794. u64 addr = 0;
  795. u32 ctrl = 0;
  796. int err, idx = compat_ptrace_hbp_num_to_idx(num);;
  797. if (num & 1) {
  798. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  799. *kdata = (u32)addr;
  800. } else {
  801. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  802. *kdata = ctrl;
  803. }
  804. return err;
  805. }
  806. static int compat_ptrace_hbp_set(unsigned int note_type,
  807. struct task_struct *tsk,
  808. compat_long_t num,
  809. u32 *kdata)
  810. {
  811. u64 addr;
  812. u32 ctrl;
  813. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  814. if (num & 1) {
  815. addr = *kdata;
  816. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  817. } else {
  818. ctrl = *kdata;
  819. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  820. }
  821. return err;
  822. }
  823. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  824. compat_ulong_t __user *data)
  825. {
  826. int ret;
  827. u32 kdata;
  828. mm_segment_t old_fs = get_fs();
  829. set_fs(KERNEL_DS);
  830. /* Watchpoint */
  831. if (num < 0) {
  832. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  833. /* Resource info */
  834. } else if (num == 0) {
  835. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  836. /* Breakpoint */
  837. } else {
  838. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  839. }
  840. set_fs(old_fs);
  841. if (!ret)
  842. ret = put_user(kdata, data);
  843. return ret;
  844. }
  845. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  846. compat_ulong_t __user *data)
  847. {
  848. int ret;
  849. u32 kdata = 0;
  850. mm_segment_t old_fs = get_fs();
  851. if (num == 0)
  852. return 0;
  853. ret = get_user(kdata, data);
  854. if (ret)
  855. return ret;
  856. set_fs(KERNEL_DS);
  857. if (num < 0)
  858. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  859. else
  860. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  861. set_fs(old_fs);
  862. return ret;
  863. }
  864. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  865. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  866. compat_ulong_t caddr, compat_ulong_t cdata)
  867. {
  868. unsigned long addr = caddr;
  869. unsigned long data = cdata;
  870. void __user *datap = compat_ptr(data);
  871. int ret;
  872. switch (request) {
  873. case PTRACE_PEEKUSR:
  874. ret = compat_ptrace_read_user(child, addr, datap);
  875. break;
  876. case PTRACE_POKEUSR:
  877. ret = compat_ptrace_write_user(child, addr, data);
  878. break;
  879. case COMPAT_PTRACE_GETREGS:
  880. ret = copy_regset_to_user(child,
  881. &user_aarch32_view,
  882. REGSET_COMPAT_GPR,
  883. 0, sizeof(compat_elf_gregset_t),
  884. datap);
  885. break;
  886. case COMPAT_PTRACE_SETREGS:
  887. ret = copy_regset_from_user(child,
  888. &user_aarch32_view,
  889. REGSET_COMPAT_GPR,
  890. 0, sizeof(compat_elf_gregset_t),
  891. datap);
  892. break;
  893. case COMPAT_PTRACE_GET_THREAD_AREA:
  894. ret = put_user((compat_ulong_t)child->thread.tp_value,
  895. (compat_ulong_t __user *)datap);
  896. break;
  897. case COMPAT_PTRACE_SET_SYSCALL:
  898. task_pt_regs(child)->syscallno = data;
  899. ret = 0;
  900. break;
  901. case COMPAT_PTRACE_GETVFPREGS:
  902. ret = copy_regset_to_user(child,
  903. &user_aarch32_view,
  904. REGSET_COMPAT_VFP,
  905. 0, VFP_STATE_SIZE,
  906. datap);
  907. break;
  908. case COMPAT_PTRACE_SETVFPREGS:
  909. ret = copy_regset_from_user(child,
  910. &user_aarch32_view,
  911. REGSET_COMPAT_VFP,
  912. 0, VFP_STATE_SIZE,
  913. datap);
  914. break;
  915. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  916. case COMPAT_PTRACE_GETHBPREGS:
  917. ret = compat_ptrace_gethbpregs(child, addr, datap);
  918. break;
  919. case COMPAT_PTRACE_SETHBPREGS:
  920. ret = compat_ptrace_sethbpregs(child, addr, datap);
  921. break;
  922. #endif
  923. default:
  924. ret = compat_ptrace_request(child, request, addr,
  925. data);
  926. break;
  927. }
  928. return ret;
  929. }
  930. #endif /* CONFIG_COMPAT */
  931. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  932. {
  933. #ifdef CONFIG_COMPAT
  934. if (is_compat_thread(task_thread_info(task)))
  935. return &user_aarch32_view;
  936. #endif
  937. return &user_aarch64_view;
  938. }
  939. long arch_ptrace(struct task_struct *child, long request,
  940. unsigned long addr, unsigned long data)
  941. {
  942. return ptrace_request(child, request, addr, data);
  943. }
  944. static int __init ptrace_break_init(void)
  945. {
  946. hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP,
  947. TRAP_BRKPT, "ptrace BRK handler");
  948. return 0;
  949. }
  950. core_initcall(ptrace_break_init);
  951. asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
  952. {
  953. unsigned long saved_reg;
  954. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  955. return regs->syscallno;
  956. if (is_compat_task()) {
  957. /* AArch32 uses ip (r12) for scratch */
  958. saved_reg = regs->regs[12];
  959. regs->regs[12] = dir;
  960. } else {
  961. /*
  962. * Save X7. X7 is used to denote syscall entry/exit:
  963. * X7 = 0 -> entry, = 1 -> exit
  964. */
  965. saved_reg = regs->regs[7];
  966. regs->regs[7] = dir;
  967. }
  968. if (dir)
  969. tracehook_report_syscall_exit(regs, 0);
  970. else if (tracehook_report_syscall_entry(regs))
  971. regs->syscallno = ~0UL;
  972. if (is_compat_task())
  973. regs->regs[12] = saved_reg;
  974. else
  975. regs->regs[7] = saved_reg;
  976. return regs->syscallno;
  977. }