ptrace.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * Based on arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. * Copyright (C) 2012 ARM Ltd.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/sched.h>
  23. #include <linux/mm.h>
  24. #include <linux/smp.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/user.h>
  27. #include <linux/security.h>
  28. #include <linux/init.h>
  29. #include <linux/signal.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/perf_event.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/regset.h>
  34. #include <linux/tracehook.h>
  35. #include <linux/elf.h>
  36. #include <asm/compat.h>
  37. #include <asm/debug-monitors.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/traps.h>
  40. #include <asm/system_misc.h>
  41. /*
  42. * TODO: does not yet catch signals sent when the child dies.
  43. * in exit.c or in signal.c.
  44. */
  45. /*
  46. * Called by kernel/ptrace.c when detaching..
  47. */
  48. void ptrace_disable(struct task_struct *child)
  49. {
  50. }
  51. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  52. /*
  53. * Handle hitting a HW-breakpoint.
  54. */
  55. static void ptrace_hbptriggered(struct perf_event *bp,
  56. struct perf_sample_data *data,
  57. struct pt_regs *regs)
  58. {
  59. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  60. siginfo_t info = {
  61. .si_signo = SIGTRAP,
  62. .si_errno = 0,
  63. .si_code = TRAP_HWBKPT,
  64. .si_addr = (void __user *)(bkpt->trigger),
  65. };
  66. #ifdef CONFIG_COMPAT
  67. int i;
  68. if (!is_compat_task())
  69. goto send_sig;
  70. for (i = 0; i < ARM_MAX_BRP; ++i) {
  71. if (current->thread.debug.hbp_break[i] == bp) {
  72. info.si_errno = (i << 1) + 1;
  73. break;
  74. }
  75. }
  76. for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
  77. if (current->thread.debug.hbp_watch[i] == bp) {
  78. info.si_errno = -((i << 1) + 1);
  79. break;
  80. }
  81. }
  82. send_sig:
  83. #endif
  84. force_sig_info(SIGTRAP, &info, current);
  85. }
  86. /*
  87. * Unregister breakpoints from this task and reset the pointers in
  88. * the thread_struct.
  89. */
  90. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  91. {
  92. int i;
  93. struct thread_struct *t = &tsk->thread;
  94. for (i = 0; i < ARM_MAX_BRP; i++) {
  95. if (t->debug.hbp_break[i]) {
  96. unregister_hw_breakpoint(t->debug.hbp_break[i]);
  97. t->debug.hbp_break[i] = NULL;
  98. }
  99. }
  100. for (i = 0; i < ARM_MAX_WRP; i++) {
  101. if (t->debug.hbp_watch[i]) {
  102. unregister_hw_breakpoint(t->debug.hbp_watch[i]);
  103. t->debug.hbp_watch[i] = NULL;
  104. }
  105. }
  106. }
  107. void ptrace_hw_copy_thread(struct task_struct *tsk)
  108. {
  109. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  110. }
  111. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  112. struct task_struct *tsk,
  113. unsigned long idx)
  114. {
  115. struct perf_event *bp = ERR_PTR(-EINVAL);
  116. switch (note_type) {
  117. case NT_ARM_HW_BREAK:
  118. if (idx < ARM_MAX_BRP)
  119. bp = tsk->thread.debug.hbp_break[idx];
  120. break;
  121. case NT_ARM_HW_WATCH:
  122. if (idx < ARM_MAX_WRP)
  123. bp = tsk->thread.debug.hbp_watch[idx];
  124. break;
  125. }
  126. return bp;
  127. }
  128. static int ptrace_hbp_set_event(unsigned int note_type,
  129. struct task_struct *tsk,
  130. unsigned long idx,
  131. struct perf_event *bp)
  132. {
  133. int err = -EINVAL;
  134. switch (note_type) {
  135. case NT_ARM_HW_BREAK:
  136. if (idx < ARM_MAX_BRP) {
  137. tsk->thread.debug.hbp_break[idx] = bp;
  138. err = 0;
  139. }
  140. break;
  141. case NT_ARM_HW_WATCH:
  142. if (idx < ARM_MAX_WRP) {
  143. tsk->thread.debug.hbp_watch[idx] = bp;
  144. err = 0;
  145. }
  146. break;
  147. }
  148. return err;
  149. }
  150. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  151. struct task_struct *tsk,
  152. unsigned long idx)
  153. {
  154. struct perf_event *bp;
  155. struct perf_event_attr attr;
  156. int err, type;
  157. switch (note_type) {
  158. case NT_ARM_HW_BREAK:
  159. type = HW_BREAKPOINT_X;
  160. break;
  161. case NT_ARM_HW_WATCH:
  162. type = HW_BREAKPOINT_RW;
  163. break;
  164. default:
  165. return ERR_PTR(-EINVAL);
  166. }
  167. ptrace_breakpoint_init(&attr);
  168. /*
  169. * Initialise fields to sane defaults
  170. * (i.e. values that will pass validation).
  171. */
  172. attr.bp_addr = 0;
  173. attr.bp_len = HW_BREAKPOINT_LEN_4;
  174. attr.bp_type = type;
  175. attr.disabled = 1;
  176. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  177. if (IS_ERR(bp))
  178. return bp;
  179. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  180. if (err)
  181. return ERR_PTR(err);
  182. return bp;
  183. }
  184. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  185. struct arch_hw_breakpoint_ctrl ctrl,
  186. struct perf_event_attr *attr)
  187. {
  188. int err, len, type, disabled = !ctrl.enabled;
  189. if (disabled) {
  190. len = 0;
  191. type = HW_BREAKPOINT_EMPTY;
  192. } else {
  193. err = arch_bp_generic_fields(ctrl, &len, &type);
  194. if (err)
  195. return err;
  196. switch (note_type) {
  197. case NT_ARM_HW_BREAK:
  198. if ((type & HW_BREAKPOINT_X) != type)
  199. return -EINVAL;
  200. break;
  201. case NT_ARM_HW_WATCH:
  202. if ((type & HW_BREAKPOINT_RW) != type)
  203. return -EINVAL;
  204. break;
  205. default:
  206. return -EINVAL;
  207. }
  208. }
  209. attr->bp_len = len;
  210. attr->bp_type = type;
  211. attr->disabled = disabled;
  212. return 0;
  213. }
  214. static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
  215. {
  216. u8 num;
  217. u32 reg = 0;
  218. switch (note_type) {
  219. case NT_ARM_HW_BREAK:
  220. num = hw_breakpoint_slots(TYPE_INST);
  221. break;
  222. case NT_ARM_HW_WATCH:
  223. num = hw_breakpoint_slots(TYPE_DATA);
  224. break;
  225. default:
  226. return -EINVAL;
  227. }
  228. reg |= debug_monitors_arch();
  229. reg <<= 8;
  230. reg |= num;
  231. *info = reg;
  232. return 0;
  233. }
  234. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  235. struct task_struct *tsk,
  236. unsigned long idx,
  237. u32 *ctrl)
  238. {
  239. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  240. if (IS_ERR(bp))
  241. return PTR_ERR(bp);
  242. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  243. return 0;
  244. }
  245. static int ptrace_hbp_get_addr(unsigned int note_type,
  246. struct task_struct *tsk,
  247. unsigned long idx,
  248. u64 *addr)
  249. {
  250. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  251. if (IS_ERR(bp))
  252. return PTR_ERR(bp);
  253. *addr = bp ? bp->attr.bp_addr : 0;
  254. return 0;
  255. }
  256. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  257. struct task_struct *tsk,
  258. unsigned long idx)
  259. {
  260. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  261. if (!bp)
  262. bp = ptrace_hbp_create(note_type, tsk, idx);
  263. return bp;
  264. }
  265. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  266. struct task_struct *tsk,
  267. unsigned long idx,
  268. u32 uctrl)
  269. {
  270. int err;
  271. struct perf_event *bp;
  272. struct perf_event_attr attr;
  273. struct arch_hw_breakpoint_ctrl ctrl;
  274. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  275. if (IS_ERR(bp)) {
  276. err = PTR_ERR(bp);
  277. return err;
  278. }
  279. attr = bp->attr;
  280. decode_ctrl_reg(uctrl, &ctrl);
  281. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  282. if (err)
  283. return err;
  284. return modify_user_hw_breakpoint(bp, &attr);
  285. }
  286. static int ptrace_hbp_set_addr(unsigned int note_type,
  287. struct task_struct *tsk,
  288. unsigned long idx,
  289. u64 addr)
  290. {
  291. int err;
  292. struct perf_event *bp;
  293. struct perf_event_attr attr;
  294. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  295. if (IS_ERR(bp)) {
  296. err = PTR_ERR(bp);
  297. return err;
  298. }
  299. attr = bp->attr;
  300. attr.bp_addr = addr;
  301. err = modify_user_hw_breakpoint(bp, &attr);
  302. return err;
  303. }
  304. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  305. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  306. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  307. static int hw_break_get(struct task_struct *target,
  308. const struct user_regset *regset,
  309. unsigned int pos, unsigned int count,
  310. void *kbuf, void __user *ubuf)
  311. {
  312. unsigned int note_type = regset->core_note_type;
  313. int ret, idx = 0, offset, limit;
  314. u32 info, ctrl;
  315. u64 addr;
  316. /* Resource info */
  317. ret = ptrace_hbp_get_resource_info(note_type, &info);
  318. if (ret)
  319. return ret;
  320. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
  321. sizeof(info));
  322. if (ret)
  323. return ret;
  324. /* Pad */
  325. offset = offsetof(struct user_hwdebug_state, pad);
  326. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
  327. offset + PTRACE_HBP_PAD_SZ);
  328. if (ret)
  329. return ret;
  330. /* (address, ctrl) registers */
  331. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  332. limit = regset->n * regset->size;
  333. while (count && offset < limit) {
  334. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  335. if (ret)
  336. return ret;
  337. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
  338. offset, offset + PTRACE_HBP_ADDR_SZ);
  339. if (ret)
  340. return ret;
  341. offset += PTRACE_HBP_ADDR_SZ;
  342. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  343. if (ret)
  344. return ret;
  345. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
  346. offset, offset + PTRACE_HBP_CTRL_SZ);
  347. if (ret)
  348. return ret;
  349. offset += PTRACE_HBP_CTRL_SZ;
  350. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  351. offset,
  352. offset + PTRACE_HBP_PAD_SZ);
  353. if (ret)
  354. return ret;
  355. offset += PTRACE_HBP_PAD_SZ;
  356. idx++;
  357. }
  358. return 0;
  359. }
  360. static int hw_break_set(struct task_struct *target,
  361. const struct user_regset *regset,
  362. unsigned int pos, unsigned int count,
  363. const void *kbuf, const void __user *ubuf)
  364. {
  365. unsigned int note_type = regset->core_note_type;
  366. int ret, idx = 0, offset, limit;
  367. u32 ctrl;
  368. u64 addr;
  369. /* Resource info and pad */
  370. offset = offsetof(struct user_hwdebug_state, dbg_regs);
  371. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  372. if (ret)
  373. return ret;
  374. /* (address, ctrl) registers */
  375. limit = regset->n * regset->size;
  376. while (count && offset < limit) {
  377. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  378. offset, offset + PTRACE_HBP_ADDR_SZ);
  379. if (ret)
  380. return ret;
  381. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  382. if (ret)
  383. return ret;
  384. offset += PTRACE_HBP_ADDR_SZ;
  385. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  386. offset, offset + PTRACE_HBP_CTRL_SZ);
  387. if (ret)
  388. return ret;
  389. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  390. if (ret)
  391. return ret;
  392. offset += PTRACE_HBP_CTRL_SZ;
  393. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  394. offset,
  395. offset + PTRACE_HBP_PAD_SZ);
  396. if (ret)
  397. return ret;
  398. offset += PTRACE_HBP_PAD_SZ;
  399. idx++;
  400. }
  401. return 0;
  402. }
  403. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  404. static int gpr_get(struct task_struct *target,
  405. const struct user_regset *regset,
  406. unsigned int pos, unsigned int count,
  407. void *kbuf, void __user *ubuf)
  408. {
  409. struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
  410. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  411. }
  412. static int gpr_set(struct task_struct *target, const struct user_regset *regset,
  413. unsigned int pos, unsigned int count,
  414. const void *kbuf, const void __user *ubuf)
  415. {
  416. int ret;
  417. struct user_pt_regs newregs;
  418. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
  419. if (ret)
  420. return ret;
  421. if (!valid_user_regs(&newregs))
  422. return -EINVAL;
  423. task_pt_regs(target)->user_regs = newregs;
  424. return 0;
  425. }
  426. /*
  427. * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
  428. */
  429. static int fpr_get(struct task_struct *target, const struct user_regset *regset,
  430. unsigned int pos, unsigned int count,
  431. void *kbuf, void __user *ubuf)
  432. {
  433. struct user_fpsimd_state *uregs;
  434. uregs = &target->thread.fpsimd_state.user_fpsimd;
  435. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
  436. }
  437. static int fpr_set(struct task_struct *target, const struct user_regset *regset,
  438. unsigned int pos, unsigned int count,
  439. const void *kbuf, const void __user *ubuf)
  440. {
  441. int ret;
  442. struct user_fpsimd_state newstate;
  443. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  444. if (ret)
  445. return ret;
  446. target->thread.fpsimd_state.user_fpsimd = newstate;
  447. return ret;
  448. }
  449. static int tls_get(struct task_struct *target, const struct user_regset *regset,
  450. unsigned int pos, unsigned int count,
  451. void *kbuf, void __user *ubuf)
  452. {
  453. unsigned long *tls = &target->thread.tp_value;
  454. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
  455. }
  456. static int tls_set(struct task_struct *target, const struct user_regset *regset,
  457. unsigned int pos, unsigned int count,
  458. const void *kbuf, const void __user *ubuf)
  459. {
  460. int ret;
  461. unsigned long tls;
  462. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
  463. if (ret)
  464. return ret;
  465. target->thread.tp_value = tls;
  466. return ret;
  467. }
  468. enum aarch64_regset {
  469. REGSET_GPR,
  470. REGSET_FPR,
  471. REGSET_TLS,
  472. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  473. REGSET_HW_BREAK,
  474. REGSET_HW_WATCH,
  475. #endif
  476. };
  477. static const struct user_regset aarch64_regsets[] = {
  478. [REGSET_GPR] = {
  479. .core_note_type = NT_PRSTATUS,
  480. .n = sizeof(struct user_pt_regs) / sizeof(u64),
  481. .size = sizeof(u64),
  482. .align = sizeof(u64),
  483. .get = gpr_get,
  484. .set = gpr_set
  485. },
  486. [REGSET_FPR] = {
  487. .core_note_type = NT_PRFPREG,
  488. .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
  489. /*
  490. * We pretend we have 32-bit registers because the fpsr and
  491. * fpcr are 32-bits wide.
  492. */
  493. .size = sizeof(u32),
  494. .align = sizeof(u32),
  495. .get = fpr_get,
  496. .set = fpr_set
  497. },
  498. [REGSET_TLS] = {
  499. .core_note_type = NT_ARM_TLS,
  500. .n = 1,
  501. .size = sizeof(void *),
  502. .align = sizeof(void *),
  503. .get = tls_get,
  504. .set = tls_set,
  505. },
  506. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  507. [REGSET_HW_BREAK] = {
  508. .core_note_type = NT_ARM_HW_BREAK,
  509. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  510. .size = sizeof(u32),
  511. .align = sizeof(u32),
  512. .get = hw_break_get,
  513. .set = hw_break_set,
  514. },
  515. [REGSET_HW_WATCH] = {
  516. .core_note_type = NT_ARM_HW_WATCH,
  517. .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
  518. .size = sizeof(u32),
  519. .align = sizeof(u32),
  520. .get = hw_break_get,
  521. .set = hw_break_set,
  522. },
  523. #endif
  524. };
  525. static const struct user_regset_view user_aarch64_view = {
  526. .name = "aarch64", .e_machine = EM_AARCH64,
  527. .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
  528. };
  529. #ifdef CONFIG_COMPAT
  530. #include <linux/compat.h>
  531. enum compat_regset {
  532. REGSET_COMPAT_GPR,
  533. REGSET_COMPAT_VFP,
  534. };
  535. static int compat_gpr_get(struct task_struct *target,
  536. const struct user_regset *regset,
  537. unsigned int pos, unsigned int count,
  538. void *kbuf, void __user *ubuf)
  539. {
  540. int ret = 0;
  541. unsigned int i, start, num_regs;
  542. /* Calculate the number of AArch32 registers contained in count */
  543. num_regs = count / regset->size;
  544. /* Convert pos into an register number */
  545. start = pos / regset->size;
  546. if (start + num_regs > regset->n)
  547. return -EIO;
  548. for (i = 0; i < num_regs; ++i) {
  549. unsigned int idx = start + i;
  550. compat_ulong_t reg;
  551. switch (idx) {
  552. case 15:
  553. reg = task_pt_regs(target)->pc;
  554. break;
  555. case 16:
  556. reg = task_pt_regs(target)->pstate;
  557. break;
  558. case 17:
  559. reg = task_pt_regs(target)->orig_x0;
  560. break;
  561. default:
  562. reg = task_pt_regs(target)->regs[idx];
  563. }
  564. ret = copy_to_user(ubuf, &reg, sizeof(reg));
  565. if (ret)
  566. break;
  567. ubuf += sizeof(reg);
  568. }
  569. return ret;
  570. }
  571. static int compat_gpr_set(struct task_struct *target,
  572. const struct user_regset *regset,
  573. unsigned int pos, unsigned int count,
  574. const void *kbuf, const void __user *ubuf)
  575. {
  576. struct pt_regs newregs;
  577. int ret = 0;
  578. unsigned int i, start, num_regs;
  579. /* Calculate the number of AArch32 registers contained in count */
  580. num_regs = count / regset->size;
  581. /* Convert pos into an register number */
  582. start = pos / regset->size;
  583. if (start + num_regs > regset->n)
  584. return -EIO;
  585. newregs = *task_pt_regs(target);
  586. for (i = 0; i < num_regs; ++i) {
  587. unsigned int idx = start + i;
  588. compat_ulong_t reg;
  589. ret = copy_from_user(&reg, ubuf, sizeof(reg));
  590. if (ret)
  591. return ret;
  592. ubuf += sizeof(reg);
  593. switch (idx) {
  594. case 15:
  595. newregs.pc = reg;
  596. break;
  597. case 16:
  598. newregs.pstate = reg;
  599. break;
  600. case 17:
  601. newregs.orig_x0 = reg;
  602. break;
  603. default:
  604. newregs.regs[idx] = reg;
  605. }
  606. }
  607. if (valid_user_regs(&newregs.user_regs))
  608. *task_pt_regs(target) = newregs;
  609. else
  610. ret = -EINVAL;
  611. return ret;
  612. }
  613. static int compat_vfp_get(struct task_struct *target,
  614. const struct user_regset *regset,
  615. unsigned int pos, unsigned int count,
  616. void *kbuf, void __user *ubuf)
  617. {
  618. struct user_fpsimd_state *uregs;
  619. compat_ulong_t fpscr;
  620. int ret;
  621. uregs = &target->thread.fpsimd_state.user_fpsimd;
  622. /*
  623. * The VFP registers are packed into the fpsimd_state, so they all sit
  624. * nicely together for us. We just need to create the fpscr separately.
  625. */
  626. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
  627. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  628. if (count && !ret) {
  629. fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
  630. (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
  631. ret = put_user(fpscr, (compat_ulong_t *)ubuf);
  632. }
  633. return ret;
  634. }
  635. static int compat_vfp_set(struct task_struct *target,
  636. const struct user_regset *regset,
  637. unsigned int pos, unsigned int count,
  638. const void *kbuf, const void __user *ubuf)
  639. {
  640. struct user_fpsimd_state *uregs;
  641. compat_ulong_t fpscr;
  642. int ret;
  643. if (pos + count > VFP_STATE_SIZE)
  644. return -EIO;
  645. uregs = &target->thread.fpsimd_state.user_fpsimd;
  646. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  647. VFP_STATE_SIZE - sizeof(compat_ulong_t));
  648. if (count && !ret) {
  649. ret = get_user(fpscr, (compat_ulong_t *)ubuf);
  650. uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
  651. uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
  652. }
  653. return ret;
  654. }
  655. static const struct user_regset aarch32_regsets[] = {
  656. [REGSET_COMPAT_GPR] = {
  657. .core_note_type = NT_PRSTATUS,
  658. .n = COMPAT_ELF_NGREG,
  659. .size = sizeof(compat_elf_greg_t),
  660. .align = sizeof(compat_elf_greg_t),
  661. .get = compat_gpr_get,
  662. .set = compat_gpr_set
  663. },
  664. [REGSET_COMPAT_VFP] = {
  665. .core_note_type = NT_ARM_VFP,
  666. .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
  667. .size = sizeof(compat_ulong_t),
  668. .align = sizeof(compat_ulong_t),
  669. .get = compat_vfp_get,
  670. .set = compat_vfp_set
  671. },
  672. };
  673. static const struct user_regset_view user_aarch32_view = {
  674. .name = "aarch32", .e_machine = EM_ARM,
  675. .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
  676. };
  677. static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
  678. compat_ulong_t __user *ret)
  679. {
  680. compat_ulong_t tmp;
  681. if (off & 3)
  682. return -EIO;
  683. if (off == COMPAT_PT_TEXT_ADDR)
  684. tmp = tsk->mm->start_code;
  685. else if (off == COMPAT_PT_DATA_ADDR)
  686. tmp = tsk->mm->start_data;
  687. else if (off == COMPAT_PT_TEXT_END_ADDR)
  688. tmp = tsk->mm->end_code;
  689. else if (off < sizeof(compat_elf_gregset_t))
  690. return copy_regset_to_user(tsk, &user_aarch32_view,
  691. REGSET_COMPAT_GPR, off,
  692. sizeof(compat_ulong_t), ret);
  693. else if (off >= COMPAT_USER_SZ)
  694. return -EIO;
  695. else
  696. tmp = 0;
  697. return put_user(tmp, ret);
  698. }
  699. static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
  700. compat_ulong_t val)
  701. {
  702. int ret;
  703. if (off & 3 || off >= COMPAT_USER_SZ)
  704. return -EIO;
  705. if (off >= sizeof(compat_elf_gregset_t))
  706. return 0;
  707. ret = copy_regset_from_user(tsk, &user_aarch32_view,
  708. REGSET_COMPAT_GPR, off,
  709. sizeof(compat_ulong_t),
  710. &val);
  711. return ret;
  712. }
  713. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  714. /*
  715. * Convert a virtual register number into an index for a thread_info
  716. * breakpoint array. Breakpoints are identified using positive numbers
  717. * whilst watchpoints are negative. The registers are laid out as pairs
  718. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  719. * Register 0 is reserved for describing resource information.
  720. */
  721. static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
  722. {
  723. return (abs(num) - 1) >> 1;
  724. }
  725. static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
  726. {
  727. u8 num_brps, num_wrps, debug_arch, wp_len;
  728. u32 reg = 0;
  729. num_brps = hw_breakpoint_slots(TYPE_INST);
  730. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  731. debug_arch = debug_monitors_arch();
  732. wp_len = 8;
  733. reg |= debug_arch;
  734. reg <<= 8;
  735. reg |= wp_len;
  736. reg <<= 8;
  737. reg |= num_wrps;
  738. reg <<= 8;
  739. reg |= num_brps;
  740. *kdata = reg;
  741. return 0;
  742. }
  743. static int compat_ptrace_hbp_get(unsigned int note_type,
  744. struct task_struct *tsk,
  745. compat_long_t num,
  746. u32 *kdata)
  747. {
  748. u64 addr = 0;
  749. u32 ctrl = 0;
  750. int err, idx = compat_ptrace_hbp_num_to_idx(num);;
  751. if (num & 1) {
  752. err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
  753. *kdata = (u32)addr;
  754. } else {
  755. err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
  756. *kdata = ctrl;
  757. }
  758. return err;
  759. }
  760. static int compat_ptrace_hbp_set(unsigned int note_type,
  761. struct task_struct *tsk,
  762. compat_long_t num,
  763. u32 *kdata)
  764. {
  765. u64 addr;
  766. u32 ctrl;
  767. int err, idx = compat_ptrace_hbp_num_to_idx(num);
  768. if (num & 1) {
  769. addr = *kdata;
  770. err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
  771. } else {
  772. ctrl = *kdata;
  773. err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
  774. }
  775. return err;
  776. }
  777. static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
  778. compat_ulong_t __user *data)
  779. {
  780. int ret;
  781. u32 kdata;
  782. mm_segment_t old_fs = get_fs();
  783. set_fs(KERNEL_DS);
  784. /* Watchpoint */
  785. if (num < 0) {
  786. ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
  787. /* Resource info */
  788. } else if (num == 0) {
  789. ret = compat_ptrace_hbp_get_resource_info(&kdata);
  790. /* Breakpoint */
  791. } else {
  792. ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
  793. }
  794. set_fs(old_fs);
  795. if (!ret)
  796. ret = put_user(kdata, data);
  797. return ret;
  798. }
  799. static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
  800. compat_ulong_t __user *data)
  801. {
  802. int ret;
  803. u32 kdata = 0;
  804. mm_segment_t old_fs = get_fs();
  805. if (num == 0)
  806. return 0;
  807. ret = get_user(kdata, data);
  808. if (ret)
  809. return ret;
  810. set_fs(KERNEL_DS);
  811. if (num < 0)
  812. ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
  813. else
  814. ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
  815. set_fs(old_fs);
  816. return ret;
  817. }
  818. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  819. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  820. compat_ulong_t caddr, compat_ulong_t cdata)
  821. {
  822. unsigned long addr = caddr;
  823. unsigned long data = cdata;
  824. void __user *datap = compat_ptr(data);
  825. int ret;
  826. switch (request) {
  827. case PTRACE_PEEKUSR:
  828. ret = compat_ptrace_read_user(child, addr, datap);
  829. break;
  830. case PTRACE_POKEUSR:
  831. ret = compat_ptrace_write_user(child, addr, data);
  832. break;
  833. case COMPAT_PTRACE_GETREGS:
  834. ret = copy_regset_to_user(child,
  835. &user_aarch32_view,
  836. REGSET_COMPAT_GPR,
  837. 0, sizeof(compat_elf_gregset_t),
  838. datap);
  839. break;
  840. case COMPAT_PTRACE_SETREGS:
  841. ret = copy_regset_from_user(child,
  842. &user_aarch32_view,
  843. REGSET_COMPAT_GPR,
  844. 0, sizeof(compat_elf_gregset_t),
  845. datap);
  846. break;
  847. case COMPAT_PTRACE_GET_THREAD_AREA:
  848. ret = put_user((compat_ulong_t)child->thread.tp_value,
  849. (compat_ulong_t __user *)datap);
  850. break;
  851. case COMPAT_PTRACE_SET_SYSCALL:
  852. task_pt_regs(child)->syscallno = data;
  853. ret = 0;
  854. break;
  855. case COMPAT_PTRACE_GETVFPREGS:
  856. ret = copy_regset_to_user(child,
  857. &user_aarch32_view,
  858. REGSET_COMPAT_VFP,
  859. 0, VFP_STATE_SIZE,
  860. datap);
  861. break;
  862. case COMPAT_PTRACE_SETVFPREGS:
  863. ret = copy_regset_from_user(child,
  864. &user_aarch32_view,
  865. REGSET_COMPAT_VFP,
  866. 0, VFP_STATE_SIZE,
  867. datap);
  868. break;
  869. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  870. case COMPAT_PTRACE_GETHBPREGS:
  871. ret = compat_ptrace_gethbpregs(child, addr, datap);
  872. break;
  873. case COMPAT_PTRACE_SETHBPREGS:
  874. ret = compat_ptrace_sethbpregs(child, addr, datap);
  875. break;
  876. #endif
  877. default:
  878. ret = compat_ptrace_request(child, request, addr,
  879. data);
  880. break;
  881. }
  882. return ret;
  883. }
  884. #endif /* CONFIG_COMPAT */
  885. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  886. {
  887. #ifdef CONFIG_COMPAT
  888. if (is_compat_thread(task_thread_info(task)))
  889. return &user_aarch32_view;
  890. #endif
  891. return &user_aarch64_view;
  892. }
  893. long arch_ptrace(struct task_struct *child, long request,
  894. unsigned long addr, unsigned long data)
  895. {
  896. return ptrace_request(child, request, addr, data);
  897. }
  898. asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
  899. {
  900. unsigned long saved_reg;
  901. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  902. return regs->syscallno;
  903. if (is_compat_task()) {
  904. /* AArch32 uses ip (r12) for scratch */
  905. saved_reg = regs->regs[12];
  906. regs->regs[12] = dir;
  907. } else {
  908. /*
  909. * Save X7. X7 is used to denote syscall entry/exit:
  910. * X7 = 0 -> entry, = 1 -> exit
  911. */
  912. saved_reg = regs->regs[7];
  913. regs->regs[7] = dir;
  914. }
  915. if (dir)
  916. tracehook_report_syscall_exit(regs, 0);
  917. else if (tracehook_report_syscall_entry(regs))
  918. regs->syscallno = ~0UL;
  919. if (is_compat_task())
  920. regs->regs[12] = saved_reg;
  921. else
  922. regs->regs[7] = saved_reg;
  923. return regs->syscallno;
  924. }