ptrace.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/smp.h>
  10. #include <linux/errno.h>
  11. #include <linux/slab.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/regset.h>
  14. #include <linux/tracehook.h>
  15. #include <linux/user.h>
  16. #include <linux/elf.h>
  17. #include <linux/security.h>
  18. #include <linux/audit.h>
  19. #include <linux/seccomp.h>
  20. #include <linux/signal.h>
  21. #include <linux/perf_event.h>
  22. #include <linux/hw_breakpoint.h>
  23. #include <linux/rcupdate.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/processor.h>
  27. #include <asm/i387.h>
  28. #include <asm/fpu-internal.h>
  29. #include <asm/debugreg.h>
  30. #include <asm/ldt.h>
  31. #include <asm/desc.h>
  32. #include <asm/prctl.h>
  33. #include <asm/proto.h>
  34. #include <asm/hw_breakpoint.h>
  35. #include <asm/traps.h>
  36. #include "tls.h"
  37. #define CREATE_TRACE_POINTS
  38. #include <trace/events/syscalls.h>
  39. enum x86_regset {
  40. REGSET_GENERAL,
  41. REGSET_FP,
  42. REGSET_XFP,
  43. REGSET_IOPERM64 = REGSET_XFP,
  44. REGSET_XSTATE,
  45. REGSET_TLS,
  46. REGSET_IOPERM32,
  47. };
  48. struct pt_regs_offset {
  49. const char *name;
  50. int offset;
  51. };
  52. #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  53. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  54. static const struct pt_regs_offset regoffset_table[] = {
  55. #ifdef CONFIG_X86_64
  56. REG_OFFSET_NAME(r15),
  57. REG_OFFSET_NAME(r14),
  58. REG_OFFSET_NAME(r13),
  59. REG_OFFSET_NAME(r12),
  60. REG_OFFSET_NAME(r11),
  61. REG_OFFSET_NAME(r10),
  62. REG_OFFSET_NAME(r9),
  63. REG_OFFSET_NAME(r8),
  64. #endif
  65. REG_OFFSET_NAME(bx),
  66. REG_OFFSET_NAME(cx),
  67. REG_OFFSET_NAME(dx),
  68. REG_OFFSET_NAME(si),
  69. REG_OFFSET_NAME(di),
  70. REG_OFFSET_NAME(bp),
  71. REG_OFFSET_NAME(ax),
  72. #ifdef CONFIG_X86_32
  73. REG_OFFSET_NAME(ds),
  74. REG_OFFSET_NAME(es),
  75. REG_OFFSET_NAME(fs),
  76. REG_OFFSET_NAME(gs),
  77. #endif
  78. REG_OFFSET_NAME(orig_ax),
  79. REG_OFFSET_NAME(ip),
  80. REG_OFFSET_NAME(cs),
  81. REG_OFFSET_NAME(flags),
  82. REG_OFFSET_NAME(sp),
  83. REG_OFFSET_NAME(ss),
  84. REG_OFFSET_END,
  85. };
  86. /**
  87. * regs_query_register_offset() - query register offset from its name
  88. * @name: the name of a register
  89. *
  90. * regs_query_register_offset() returns the offset of a register in struct
  91. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  92. */
  93. int regs_query_register_offset(const char *name)
  94. {
  95. const struct pt_regs_offset *roff;
  96. for (roff = regoffset_table; roff->name != NULL; roff++)
  97. if (!strcmp(roff->name, name))
  98. return roff->offset;
  99. return -EINVAL;
  100. }
  101. /**
  102. * regs_query_register_name() - query register name from its offset
  103. * @offset: the offset of a register in struct pt_regs.
  104. *
  105. * regs_query_register_name() returns the name of a register from its
  106. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  107. */
  108. const char *regs_query_register_name(unsigned int offset)
  109. {
  110. const struct pt_regs_offset *roff;
  111. for (roff = regoffset_table; roff->name != NULL; roff++)
  112. if (roff->offset == offset)
  113. return roff->name;
  114. return NULL;
  115. }
  116. static const int arg_offs_table[] = {
  117. #ifdef CONFIG_X86_32
  118. [0] = offsetof(struct pt_regs, ax),
  119. [1] = offsetof(struct pt_regs, dx),
  120. [2] = offsetof(struct pt_regs, cx)
  121. #else /* CONFIG_X86_64 */
  122. [0] = offsetof(struct pt_regs, di),
  123. [1] = offsetof(struct pt_regs, si),
  124. [2] = offsetof(struct pt_regs, dx),
  125. [3] = offsetof(struct pt_regs, cx),
  126. [4] = offsetof(struct pt_regs, r8),
  127. [5] = offsetof(struct pt_regs, r9)
  128. #endif
  129. };
  130. /*
  131. * does not yet catch signals sent when the child dies.
  132. * in exit.c or in signal.c.
  133. */
  134. /*
  135. * Determines which flags the user has access to [1 = access, 0 = no access].
  136. */
  137. #define FLAG_MASK_32 ((unsigned long) \
  138. (X86_EFLAGS_CF | X86_EFLAGS_PF | \
  139. X86_EFLAGS_AF | X86_EFLAGS_ZF | \
  140. X86_EFLAGS_SF | X86_EFLAGS_TF | \
  141. X86_EFLAGS_DF | X86_EFLAGS_OF | \
  142. X86_EFLAGS_RF | X86_EFLAGS_AC))
  143. /*
  144. * Determines whether a value may be installed in a segment register.
  145. */
  146. static inline bool invalid_selector(u16 value)
  147. {
  148. return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
  149. }
  150. #ifdef CONFIG_X86_32
  151. #define FLAG_MASK FLAG_MASK_32
  152. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
  153. {
  154. BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
  155. return &regs->bx + (regno >> 2);
  156. }
  157. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  158. {
  159. /*
  160. * Returning the value truncates it to 16 bits.
  161. */
  162. unsigned int retval;
  163. if (offset != offsetof(struct user_regs_struct, gs))
  164. retval = *pt_regs_access(task_pt_regs(task), offset);
  165. else {
  166. if (task == current)
  167. retval = get_user_gs(task_pt_regs(task));
  168. else
  169. retval = task_user_gs(task);
  170. }
  171. return retval;
  172. }
  173. static int set_segment_reg(struct task_struct *task,
  174. unsigned long offset, u16 value)
  175. {
  176. /*
  177. * The value argument was already truncated to 16 bits.
  178. */
  179. if (invalid_selector(value))
  180. return -EIO;
  181. /*
  182. * For %cs and %ss we cannot permit a null selector.
  183. * We can permit a bogus selector as long as it has USER_RPL.
  184. * Null selectors are fine for other segment registers, but
  185. * we will never get back to user mode with invalid %cs or %ss
  186. * and will take the trap in iret instead. Much code relies
  187. * on user_mode() to distinguish a user trap frame (which can
  188. * safely use invalid selectors) from a kernel trap frame.
  189. */
  190. switch (offset) {
  191. case offsetof(struct user_regs_struct, cs):
  192. case offsetof(struct user_regs_struct, ss):
  193. if (unlikely(value == 0))
  194. return -EIO;
  195. default:
  196. *pt_regs_access(task_pt_regs(task), offset) = value;
  197. break;
  198. case offsetof(struct user_regs_struct, gs):
  199. if (task == current)
  200. set_user_gs(task_pt_regs(task), value);
  201. else
  202. task_user_gs(task) = value;
  203. }
  204. return 0;
  205. }
  206. #else /* CONFIG_X86_64 */
  207. #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
  208. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
  209. {
  210. BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
  211. return &regs->r15 + (offset / sizeof(regs->r15));
  212. }
  213. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  214. {
  215. /*
  216. * Returning the value truncates it to 16 bits.
  217. */
  218. unsigned int seg;
  219. switch (offset) {
  220. case offsetof(struct user_regs_struct, fs):
  221. if (task == current) {
  222. /* Older gas can't assemble movq %?s,%r?? */
  223. asm("movl %%fs,%0" : "=r" (seg));
  224. return seg;
  225. }
  226. return task->thread.fsindex;
  227. case offsetof(struct user_regs_struct, gs):
  228. if (task == current) {
  229. asm("movl %%gs,%0" : "=r" (seg));
  230. return seg;
  231. }
  232. return task->thread.gsindex;
  233. case offsetof(struct user_regs_struct, ds):
  234. if (task == current) {
  235. asm("movl %%ds,%0" : "=r" (seg));
  236. return seg;
  237. }
  238. return task->thread.ds;
  239. case offsetof(struct user_regs_struct, es):
  240. if (task == current) {
  241. asm("movl %%es,%0" : "=r" (seg));
  242. return seg;
  243. }
  244. return task->thread.es;
  245. case offsetof(struct user_regs_struct, cs):
  246. case offsetof(struct user_regs_struct, ss):
  247. break;
  248. }
  249. return *pt_regs_access(task_pt_regs(task), offset);
  250. }
  251. static int set_segment_reg(struct task_struct *task,
  252. unsigned long offset, u16 value)
  253. {
  254. /*
  255. * The value argument was already truncated to 16 bits.
  256. */
  257. if (invalid_selector(value))
  258. return -EIO;
  259. switch (offset) {
  260. case offsetof(struct user_regs_struct,fs):
  261. /*
  262. * If this is setting fs as for normal 64-bit use but
  263. * setting fs_base has implicitly changed it, leave it.
  264. */
  265. if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
  266. task->thread.fs != 0) ||
  267. (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
  268. task->thread.fs == 0))
  269. break;
  270. task->thread.fsindex = value;
  271. if (task == current)
  272. loadsegment(fs, task->thread.fsindex);
  273. break;
  274. case offsetof(struct user_regs_struct,gs):
  275. /*
  276. * If this is setting gs as for normal 64-bit use but
  277. * setting gs_base has implicitly changed it, leave it.
  278. */
  279. if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
  280. task->thread.gs != 0) ||
  281. (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
  282. task->thread.gs == 0))
  283. break;
  284. task->thread.gsindex = value;
  285. if (task == current)
  286. load_gs_index(task->thread.gsindex);
  287. break;
  288. case offsetof(struct user_regs_struct,ds):
  289. task->thread.ds = value;
  290. if (task == current)
  291. loadsegment(ds, task->thread.ds);
  292. break;
  293. case offsetof(struct user_regs_struct,es):
  294. task->thread.es = value;
  295. if (task == current)
  296. loadsegment(es, task->thread.es);
  297. break;
  298. /*
  299. * Can't actually change these in 64-bit mode.
  300. */
  301. case offsetof(struct user_regs_struct,cs):
  302. if (unlikely(value == 0))
  303. return -EIO;
  304. #ifdef CONFIG_IA32_EMULATION
  305. if (test_tsk_thread_flag(task, TIF_IA32))
  306. task_pt_regs(task)->cs = value;
  307. #endif
  308. break;
  309. case offsetof(struct user_regs_struct,ss):
  310. if (unlikely(value == 0))
  311. return -EIO;
  312. #ifdef CONFIG_IA32_EMULATION
  313. if (test_tsk_thread_flag(task, TIF_IA32))
  314. task_pt_regs(task)->ss = value;
  315. #endif
  316. break;
  317. }
  318. return 0;
  319. }
  320. #endif /* CONFIG_X86_32 */
  321. static unsigned long get_flags(struct task_struct *task)
  322. {
  323. unsigned long retval = task_pt_regs(task)->flags;
  324. /*
  325. * If the debugger set TF, hide it from the readout.
  326. */
  327. if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  328. retval &= ~X86_EFLAGS_TF;
  329. return retval;
  330. }
  331. static int set_flags(struct task_struct *task, unsigned long value)
  332. {
  333. struct pt_regs *regs = task_pt_regs(task);
  334. /*
  335. * If the user value contains TF, mark that
  336. * it was not "us" (the debugger) that set it.
  337. * If not, make sure it stays set if we had.
  338. */
  339. if (value & X86_EFLAGS_TF)
  340. clear_tsk_thread_flag(task, TIF_FORCED_TF);
  341. else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  342. value |= X86_EFLAGS_TF;
  343. regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
  344. return 0;
  345. }
  346. static int putreg(struct task_struct *child,
  347. unsigned long offset, unsigned long value)
  348. {
  349. switch (offset) {
  350. case offsetof(struct user_regs_struct, cs):
  351. case offsetof(struct user_regs_struct, ds):
  352. case offsetof(struct user_regs_struct, es):
  353. case offsetof(struct user_regs_struct, fs):
  354. case offsetof(struct user_regs_struct, gs):
  355. case offsetof(struct user_regs_struct, ss):
  356. return set_segment_reg(child, offset, value);
  357. case offsetof(struct user_regs_struct, flags):
  358. return set_flags(child, value);
  359. #ifdef CONFIG_X86_64
  360. case offsetof(struct user_regs_struct,fs_base):
  361. if (value >= TASK_SIZE_OF(child))
  362. return -EIO;
  363. /*
  364. * When changing the segment base, use do_arch_prctl
  365. * to set either thread.fs or thread.fsindex and the
  366. * corresponding GDT slot.
  367. */
  368. if (child->thread.fs != value)
  369. return do_arch_prctl(child, ARCH_SET_FS, value);
  370. return 0;
  371. case offsetof(struct user_regs_struct,gs_base):
  372. /*
  373. * Exactly the same here as the %fs handling above.
  374. */
  375. if (value >= TASK_SIZE_OF(child))
  376. return -EIO;
  377. if (child->thread.gs != value)
  378. return do_arch_prctl(child, ARCH_SET_GS, value);
  379. return 0;
  380. #endif
  381. }
  382. *pt_regs_access(task_pt_regs(child), offset) = value;
  383. return 0;
  384. }
  385. static unsigned long getreg(struct task_struct *task, unsigned long offset)
  386. {
  387. switch (offset) {
  388. case offsetof(struct user_regs_struct, cs):
  389. case offsetof(struct user_regs_struct, ds):
  390. case offsetof(struct user_regs_struct, es):
  391. case offsetof(struct user_regs_struct, fs):
  392. case offsetof(struct user_regs_struct, gs):
  393. case offsetof(struct user_regs_struct, ss):
  394. return get_segment_reg(task, offset);
  395. case offsetof(struct user_regs_struct, flags):
  396. return get_flags(task);
  397. #ifdef CONFIG_X86_64
  398. case offsetof(struct user_regs_struct, fs_base): {
  399. /*
  400. * do_arch_prctl may have used a GDT slot instead of
  401. * the MSR. To userland, it appears the same either
  402. * way, except the %fs segment selector might not be 0.
  403. */
  404. unsigned int seg = task->thread.fsindex;
  405. if (task->thread.fs != 0)
  406. return task->thread.fs;
  407. if (task == current)
  408. asm("movl %%fs,%0" : "=r" (seg));
  409. if (seg != FS_TLS_SEL)
  410. return 0;
  411. return get_desc_base(&task->thread.tls_array[FS_TLS]);
  412. }
  413. case offsetof(struct user_regs_struct, gs_base): {
  414. /*
  415. * Exactly the same here as the %fs handling above.
  416. */
  417. unsigned int seg = task->thread.gsindex;
  418. if (task->thread.gs != 0)
  419. return task->thread.gs;
  420. if (task == current)
  421. asm("movl %%gs,%0" : "=r" (seg));
  422. if (seg != GS_TLS_SEL)
  423. return 0;
  424. return get_desc_base(&task->thread.tls_array[GS_TLS]);
  425. }
  426. #endif
  427. }
  428. return *pt_regs_access(task_pt_regs(task), offset);
  429. }
  430. static int genregs_get(struct task_struct *target,
  431. const struct user_regset *regset,
  432. unsigned int pos, unsigned int count,
  433. void *kbuf, void __user *ubuf)
  434. {
  435. if (kbuf) {
  436. unsigned long *k = kbuf;
  437. while (count >= sizeof(*k)) {
  438. *k++ = getreg(target, pos);
  439. count -= sizeof(*k);
  440. pos += sizeof(*k);
  441. }
  442. } else {
  443. unsigned long __user *u = ubuf;
  444. while (count >= sizeof(*u)) {
  445. if (__put_user(getreg(target, pos), u++))
  446. return -EFAULT;
  447. count -= sizeof(*u);
  448. pos += sizeof(*u);
  449. }
  450. }
  451. return 0;
  452. }
  453. static int genregs_set(struct task_struct *target,
  454. const struct user_regset *regset,
  455. unsigned int pos, unsigned int count,
  456. const void *kbuf, const void __user *ubuf)
  457. {
  458. int ret = 0;
  459. if (kbuf) {
  460. const unsigned long *k = kbuf;
  461. while (count >= sizeof(*k) && !ret) {
  462. ret = putreg(target, pos, *k++);
  463. count -= sizeof(*k);
  464. pos += sizeof(*k);
  465. }
  466. } else {
  467. const unsigned long __user *u = ubuf;
  468. while (count >= sizeof(*u) && !ret) {
  469. unsigned long word;
  470. ret = __get_user(word, u++);
  471. if (ret)
  472. break;
  473. ret = putreg(target, pos, word);
  474. count -= sizeof(*u);
  475. pos += sizeof(*u);
  476. }
  477. }
  478. return ret;
  479. }
  480. static void ptrace_triggered(struct perf_event *bp,
  481. struct perf_sample_data *data,
  482. struct pt_regs *regs)
  483. {
  484. int i;
  485. struct thread_struct *thread = &(current->thread);
  486. /*
  487. * Store in the virtual DR6 register the fact that the breakpoint
  488. * was hit so the thread's debugger will see it.
  489. */
  490. for (i = 0; i < HBP_NUM; i++) {
  491. if (thread->ptrace_bps[i] == bp)
  492. break;
  493. }
  494. thread->debugreg6 |= (DR_TRAP0 << i);
  495. }
  496. /*
  497. * Walk through every ptrace breakpoints for this thread and
  498. * build the dr7 value on top of their attributes.
  499. *
  500. */
  501. static unsigned long ptrace_get_dr7(struct perf_event *bp[])
  502. {
  503. int i;
  504. int dr7 = 0;
  505. struct arch_hw_breakpoint *info;
  506. for (i = 0; i < HBP_NUM; i++) {
  507. if (bp[i] && !bp[i]->attr.disabled) {
  508. info = counter_arch_bp(bp[i]);
  509. dr7 |= encode_dr7(i, info->len, info->type);
  510. }
  511. }
  512. return dr7;
  513. }
  514. static int
  515. ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
  516. struct task_struct *tsk, int disabled)
  517. {
  518. int err;
  519. int gen_len, gen_type;
  520. struct perf_event_attr attr;
  521. /*
  522. * We should have at least an inactive breakpoint at this
  523. * slot. It means the user is writing dr7 without having
  524. * written the address register first
  525. */
  526. if (!bp)
  527. return -EINVAL;
  528. err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
  529. if (err)
  530. return err;
  531. attr = bp->attr;
  532. attr.bp_len = gen_len;
  533. attr.bp_type = gen_type;
  534. attr.disabled = disabled;
  535. return modify_user_hw_breakpoint(bp, &attr);
  536. }
  537. /*
  538. * Handle ptrace writes to debug register 7.
  539. */
  540. static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
  541. {
  542. struct thread_struct *thread = &(tsk->thread);
  543. unsigned long old_dr7;
  544. int i, orig_ret = 0, rc = 0;
  545. int enabled, second_pass = 0;
  546. unsigned len, type;
  547. struct perf_event *bp;
  548. if (ptrace_get_breakpoints(tsk) < 0)
  549. return -ESRCH;
  550. data &= ~DR_CONTROL_RESERVED;
  551. old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
  552. restore:
  553. /*
  554. * Loop through all the hardware breakpoints, making the
  555. * appropriate changes to each.
  556. */
  557. for (i = 0; i < HBP_NUM; i++) {
  558. enabled = decode_dr7(data, i, &len, &type);
  559. bp = thread->ptrace_bps[i];
  560. if (!enabled) {
  561. if (bp) {
  562. /*
  563. * Don't unregister the breakpoints right-away,
  564. * unless all register_user_hw_breakpoint()
  565. * requests have succeeded. This prevents
  566. * any window of opportunity for debug
  567. * register grabbing by other users.
  568. */
  569. if (!second_pass)
  570. continue;
  571. rc = ptrace_modify_breakpoint(bp, len, type,
  572. tsk, 1);
  573. if (rc)
  574. break;
  575. }
  576. continue;
  577. }
  578. rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
  579. if (rc)
  580. break;
  581. }
  582. /*
  583. * Make a second pass to free the remaining unused breakpoints
  584. * or to restore the original breakpoints if an error occurred.
  585. */
  586. if (!second_pass) {
  587. second_pass = 1;
  588. if (rc < 0) {
  589. orig_ret = rc;
  590. data = old_dr7;
  591. }
  592. goto restore;
  593. }
  594. ptrace_put_breakpoints(tsk);
  595. return ((orig_ret < 0) ? orig_ret : rc);
  596. }
  597. /*
  598. * Handle PTRACE_PEEKUSR calls for the debug register area.
  599. */
  600. static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
  601. {
  602. struct thread_struct *thread = &(tsk->thread);
  603. unsigned long val = 0;
  604. if (n < HBP_NUM) {
  605. struct perf_event *bp;
  606. if (ptrace_get_breakpoints(tsk) < 0)
  607. return -ESRCH;
  608. bp = thread->ptrace_bps[n];
  609. if (!bp)
  610. val = 0;
  611. else
  612. val = bp->hw.info.address;
  613. ptrace_put_breakpoints(tsk);
  614. } else if (n == 6) {
  615. val = thread->debugreg6;
  616. } else if (n == 7) {
  617. val = thread->ptrace_dr7;
  618. }
  619. return val;
  620. }
  621. static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
  622. unsigned long addr)
  623. {
  624. struct perf_event *bp;
  625. struct thread_struct *t = &tsk->thread;
  626. struct perf_event_attr attr;
  627. int err = 0;
  628. if (ptrace_get_breakpoints(tsk) < 0)
  629. return -ESRCH;
  630. if (!t->ptrace_bps[nr]) {
  631. ptrace_breakpoint_init(&attr);
  632. /*
  633. * Put stub len and type to register (reserve) an inactive but
  634. * correct bp
  635. */
  636. attr.bp_addr = addr;
  637. attr.bp_len = HW_BREAKPOINT_LEN_1;
  638. attr.bp_type = HW_BREAKPOINT_W;
  639. attr.disabled = 1;
  640. bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
  641. NULL, tsk);
  642. /*
  643. * CHECKME: the previous code returned -EIO if the addr wasn't
  644. * a valid task virtual addr. The new one will return -EINVAL in
  645. * this case.
  646. * -EINVAL may be what we want for in-kernel breakpoints users,
  647. * but -EIO looks better for ptrace, since we refuse a register
  648. * writing for the user. And anyway this is the previous
  649. * behaviour.
  650. */
  651. if (IS_ERR(bp)) {
  652. err = PTR_ERR(bp);
  653. goto put;
  654. }
  655. t->ptrace_bps[nr] = bp;
  656. } else {
  657. bp = t->ptrace_bps[nr];
  658. attr = bp->attr;
  659. attr.bp_addr = addr;
  660. err = modify_user_hw_breakpoint(bp, &attr);
  661. }
  662. put:
  663. ptrace_put_breakpoints(tsk);
  664. return err;
  665. }
  666. /*
  667. * Handle PTRACE_POKEUSR calls for the debug register area.
  668. */
  669. static int ptrace_set_debugreg(struct task_struct *tsk, int n,
  670. unsigned long val)
  671. {
  672. struct thread_struct *thread = &(tsk->thread);
  673. int rc = 0;
  674. /* There are no DR4 or DR5 registers */
  675. if (n == 4 || n == 5)
  676. return -EIO;
  677. if (n == 6) {
  678. thread->debugreg6 = val;
  679. goto ret_path;
  680. }
  681. if (n < HBP_NUM) {
  682. rc = ptrace_set_breakpoint_addr(tsk, n, val);
  683. if (rc)
  684. return rc;
  685. }
  686. /* All that's left is DR7 */
  687. if (n == 7) {
  688. rc = ptrace_write_dr7(tsk, val);
  689. if (!rc)
  690. thread->ptrace_dr7 = val;
  691. }
  692. ret_path:
  693. return rc;
  694. }
  695. /*
  696. * These access the current or another (stopped) task's io permission
  697. * bitmap for debugging or core dump.
  698. */
  699. static int ioperm_active(struct task_struct *target,
  700. const struct user_regset *regset)
  701. {
  702. return target->thread.io_bitmap_max / regset->size;
  703. }
  704. static int ioperm_get(struct task_struct *target,
  705. const struct user_regset *regset,
  706. unsigned int pos, unsigned int count,
  707. void *kbuf, void __user *ubuf)
  708. {
  709. if (!target->thread.io_bitmap_ptr)
  710. return -ENXIO;
  711. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  712. target->thread.io_bitmap_ptr,
  713. 0, IO_BITMAP_BYTES);
  714. }
  715. /*
  716. * Called by kernel/ptrace.c when detaching..
  717. *
  718. * Make sure the single step bit is not set.
  719. */
  720. void ptrace_disable(struct task_struct *child)
  721. {
  722. user_disable_single_step(child);
  723. #ifdef TIF_SYSCALL_EMU
  724. clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
  725. #endif
  726. }
  727. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  728. static const struct user_regset_view user_x86_32_view; /* Initialized below. */
  729. #endif
  730. long arch_ptrace(struct task_struct *child, long request,
  731. unsigned long addr, unsigned long data)
  732. {
  733. int ret;
  734. unsigned long __user *datap = (unsigned long __user *)data;
  735. switch (request) {
  736. /* read the word at location addr in the USER area. */
  737. case PTRACE_PEEKUSR: {
  738. unsigned long tmp;
  739. ret = -EIO;
  740. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
  741. break;
  742. tmp = 0; /* Default return condition */
  743. if (addr < sizeof(struct user_regs_struct))
  744. tmp = getreg(child, addr);
  745. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  746. addr <= offsetof(struct user, u_debugreg[7])) {
  747. addr -= offsetof(struct user, u_debugreg[0]);
  748. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  749. }
  750. ret = put_user(tmp, datap);
  751. break;
  752. }
  753. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  754. ret = -EIO;
  755. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
  756. break;
  757. if (addr < sizeof(struct user_regs_struct))
  758. ret = putreg(child, addr, data);
  759. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  760. addr <= offsetof(struct user, u_debugreg[7])) {
  761. addr -= offsetof(struct user, u_debugreg[0]);
  762. ret = ptrace_set_debugreg(child,
  763. addr / sizeof(data), data);
  764. }
  765. break;
  766. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  767. return copy_regset_to_user(child,
  768. task_user_regset_view(current),
  769. REGSET_GENERAL,
  770. 0, sizeof(struct user_regs_struct),
  771. datap);
  772. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  773. return copy_regset_from_user(child,
  774. task_user_regset_view(current),
  775. REGSET_GENERAL,
  776. 0, sizeof(struct user_regs_struct),
  777. datap);
  778. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  779. return copy_regset_to_user(child,
  780. task_user_regset_view(current),
  781. REGSET_FP,
  782. 0, sizeof(struct user_i387_struct),
  783. datap);
  784. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  785. return copy_regset_from_user(child,
  786. task_user_regset_view(current),
  787. REGSET_FP,
  788. 0, sizeof(struct user_i387_struct),
  789. datap);
  790. #ifdef CONFIG_X86_32
  791. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  792. return copy_regset_to_user(child, &user_x86_32_view,
  793. REGSET_XFP,
  794. 0, sizeof(struct user_fxsr_struct),
  795. datap) ? -EIO : 0;
  796. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  797. return copy_regset_from_user(child, &user_x86_32_view,
  798. REGSET_XFP,
  799. 0, sizeof(struct user_fxsr_struct),
  800. datap) ? -EIO : 0;
  801. #endif
  802. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  803. case PTRACE_GET_THREAD_AREA:
  804. if ((int) addr < 0)
  805. return -EIO;
  806. ret = do_get_thread_area(child, addr,
  807. (struct user_desc __user *)data);
  808. break;
  809. case PTRACE_SET_THREAD_AREA:
  810. if ((int) addr < 0)
  811. return -EIO;
  812. ret = do_set_thread_area(child, addr,
  813. (struct user_desc __user *)data, 0);
  814. break;
  815. #endif
  816. #ifdef CONFIG_X86_64
  817. /* normal 64bit interface to access TLS data.
  818. Works just like arch_prctl, except that the arguments
  819. are reversed. */
  820. case PTRACE_ARCH_PRCTL:
  821. ret = do_arch_prctl(child, data, addr);
  822. break;
  823. #endif
  824. default:
  825. ret = ptrace_request(child, request, addr, data);
  826. break;
  827. }
  828. return ret;
  829. }
  830. #ifdef CONFIG_IA32_EMULATION
  831. #include <linux/compat.h>
  832. #include <linux/syscalls.h>
  833. #include <asm/ia32.h>
  834. #include <asm/user32.h>
  835. #define R32(l,q) \
  836. case offsetof(struct user32, regs.l): \
  837. regs->q = value; break
  838. #define SEG32(rs) \
  839. case offsetof(struct user32, regs.rs): \
  840. return set_segment_reg(child, \
  841. offsetof(struct user_regs_struct, rs), \
  842. value); \
  843. break
  844. static int putreg32(struct task_struct *child, unsigned regno, u32 value)
  845. {
  846. struct pt_regs *regs = task_pt_regs(child);
  847. switch (regno) {
  848. SEG32(cs);
  849. SEG32(ds);
  850. SEG32(es);
  851. SEG32(fs);
  852. SEG32(gs);
  853. SEG32(ss);
  854. R32(ebx, bx);
  855. R32(ecx, cx);
  856. R32(edx, dx);
  857. R32(edi, di);
  858. R32(esi, si);
  859. R32(ebp, bp);
  860. R32(eax, ax);
  861. R32(eip, ip);
  862. R32(esp, sp);
  863. case offsetof(struct user32, regs.orig_eax):
  864. /*
  865. * A 32-bit debugger setting orig_eax means to restore
  866. * the state of the task restarting a 32-bit syscall.
  867. * Make sure we interpret the -ERESTART* codes correctly
  868. * in case the task is not actually still sitting at the
  869. * exit from a 32-bit syscall with TS_COMPAT still set.
  870. */
  871. regs->orig_ax = value;
  872. if (syscall_get_nr(child, regs) >= 0)
  873. task_thread_info(child)->status |= TS_COMPAT;
  874. break;
  875. case offsetof(struct user32, regs.eflags):
  876. return set_flags(child, value);
  877. case offsetof(struct user32, u_debugreg[0]) ...
  878. offsetof(struct user32, u_debugreg[7]):
  879. regno -= offsetof(struct user32, u_debugreg[0]);
  880. return ptrace_set_debugreg(child, regno / 4, value);
  881. default:
  882. if (regno > sizeof(struct user32) || (regno & 3))
  883. return -EIO;
  884. /*
  885. * Other dummy fields in the virtual user structure
  886. * are ignored
  887. */
  888. break;
  889. }
  890. return 0;
  891. }
  892. #undef R32
  893. #undef SEG32
  894. #define R32(l,q) \
  895. case offsetof(struct user32, regs.l): \
  896. *val = regs->q; break
  897. #define SEG32(rs) \
  898. case offsetof(struct user32, regs.rs): \
  899. *val = get_segment_reg(child, \
  900. offsetof(struct user_regs_struct, rs)); \
  901. break
  902. static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
  903. {
  904. struct pt_regs *regs = task_pt_regs(child);
  905. switch (regno) {
  906. SEG32(ds);
  907. SEG32(es);
  908. SEG32(fs);
  909. SEG32(gs);
  910. R32(cs, cs);
  911. R32(ss, ss);
  912. R32(ebx, bx);
  913. R32(ecx, cx);
  914. R32(edx, dx);
  915. R32(edi, di);
  916. R32(esi, si);
  917. R32(ebp, bp);
  918. R32(eax, ax);
  919. R32(orig_eax, orig_ax);
  920. R32(eip, ip);
  921. R32(esp, sp);
  922. case offsetof(struct user32, regs.eflags):
  923. *val = get_flags(child);
  924. break;
  925. case offsetof(struct user32, u_debugreg[0]) ...
  926. offsetof(struct user32, u_debugreg[7]):
  927. regno -= offsetof(struct user32, u_debugreg[0]);
  928. *val = ptrace_get_debugreg(child, regno / 4);
  929. break;
  930. default:
  931. if (regno > sizeof(struct user32) || (regno & 3))
  932. return -EIO;
  933. /*
  934. * Other dummy fields in the virtual user structure
  935. * are ignored
  936. */
  937. *val = 0;
  938. break;
  939. }
  940. return 0;
  941. }
  942. #undef R32
  943. #undef SEG32
  944. static int genregs32_get(struct task_struct *target,
  945. const struct user_regset *regset,
  946. unsigned int pos, unsigned int count,
  947. void *kbuf, void __user *ubuf)
  948. {
  949. if (kbuf) {
  950. compat_ulong_t *k = kbuf;
  951. while (count >= sizeof(*k)) {
  952. getreg32(target, pos, k++);
  953. count -= sizeof(*k);
  954. pos += sizeof(*k);
  955. }
  956. } else {
  957. compat_ulong_t __user *u = ubuf;
  958. while (count >= sizeof(*u)) {
  959. compat_ulong_t word;
  960. getreg32(target, pos, &word);
  961. if (__put_user(word, u++))
  962. return -EFAULT;
  963. count -= sizeof(*u);
  964. pos += sizeof(*u);
  965. }
  966. }
  967. return 0;
  968. }
  969. static int genregs32_set(struct task_struct *target,
  970. const struct user_regset *regset,
  971. unsigned int pos, unsigned int count,
  972. const void *kbuf, const void __user *ubuf)
  973. {
  974. int ret = 0;
  975. if (kbuf) {
  976. const compat_ulong_t *k = kbuf;
  977. while (count >= sizeof(*k) && !ret) {
  978. ret = putreg32(target, pos, *k++);
  979. count -= sizeof(*k);
  980. pos += sizeof(*k);
  981. }
  982. } else {
  983. const compat_ulong_t __user *u = ubuf;
  984. while (count >= sizeof(*u) && !ret) {
  985. compat_ulong_t word;
  986. ret = __get_user(word, u++);
  987. if (ret)
  988. break;
  989. ret = putreg32(target, pos, word);
  990. count -= sizeof(*u);
  991. pos += sizeof(*u);
  992. }
  993. }
  994. return ret;
  995. }
  996. #ifdef CONFIG_X86_X32_ABI
  997. static long x32_arch_ptrace(struct task_struct *child,
  998. compat_long_t request, compat_ulong_t caddr,
  999. compat_ulong_t cdata)
  1000. {
  1001. unsigned long addr = caddr;
  1002. unsigned long data = cdata;
  1003. void __user *datap = compat_ptr(data);
  1004. int ret;
  1005. switch (request) {
  1006. /* Read 32bits at location addr in the USER area. Only allow
  1007. to return the lower 32bits of segment and debug registers. */
  1008. case PTRACE_PEEKUSR: {
  1009. u32 tmp;
  1010. ret = -EIO;
  1011. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
  1012. addr < offsetof(struct user_regs_struct, cs))
  1013. break;
  1014. tmp = 0; /* Default return condition */
  1015. if (addr < sizeof(struct user_regs_struct))
  1016. tmp = getreg(child, addr);
  1017. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  1018. addr <= offsetof(struct user, u_debugreg[7])) {
  1019. addr -= offsetof(struct user, u_debugreg[0]);
  1020. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  1021. }
  1022. ret = put_user(tmp, (__u32 __user *)datap);
  1023. break;
  1024. }
  1025. /* Write the word at location addr in the USER area. Only allow
  1026. to update segment and debug registers with the upper 32bits
  1027. zero-extended. */
  1028. case PTRACE_POKEUSR:
  1029. ret = -EIO;
  1030. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
  1031. addr < offsetof(struct user_regs_struct, cs))
  1032. break;
  1033. if (addr < sizeof(struct user_regs_struct))
  1034. ret = putreg(child, addr, data);
  1035. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  1036. addr <= offsetof(struct user, u_debugreg[7])) {
  1037. addr -= offsetof(struct user, u_debugreg[0]);
  1038. ret = ptrace_set_debugreg(child,
  1039. addr / sizeof(data), data);
  1040. }
  1041. break;
  1042. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1043. return copy_regset_to_user(child,
  1044. task_user_regset_view(current),
  1045. REGSET_GENERAL,
  1046. 0, sizeof(struct user_regs_struct),
  1047. datap);
  1048. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1049. return copy_regset_from_user(child,
  1050. task_user_regset_view(current),
  1051. REGSET_GENERAL,
  1052. 0, sizeof(struct user_regs_struct),
  1053. datap);
  1054. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1055. return copy_regset_to_user(child,
  1056. task_user_regset_view(current),
  1057. REGSET_FP,
  1058. 0, sizeof(struct user_i387_struct),
  1059. datap);
  1060. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1061. return copy_regset_from_user(child,
  1062. task_user_regset_view(current),
  1063. REGSET_FP,
  1064. 0, sizeof(struct user_i387_struct),
  1065. datap);
  1066. default:
  1067. return compat_ptrace_request(child, request, addr, data);
  1068. }
  1069. return ret;
  1070. }
  1071. #endif
  1072. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  1073. compat_ulong_t caddr, compat_ulong_t cdata)
  1074. {
  1075. unsigned long addr = caddr;
  1076. unsigned long data = cdata;
  1077. void __user *datap = compat_ptr(data);
  1078. int ret;
  1079. __u32 val;
  1080. #ifdef CONFIG_X86_X32_ABI
  1081. if (!is_ia32_task())
  1082. return x32_arch_ptrace(child, request, caddr, cdata);
  1083. #endif
  1084. switch (request) {
  1085. case PTRACE_PEEKUSR:
  1086. ret = getreg32(child, addr, &val);
  1087. if (ret == 0)
  1088. ret = put_user(val, (__u32 __user *)datap);
  1089. break;
  1090. case PTRACE_POKEUSR:
  1091. ret = putreg32(child, addr, data);
  1092. break;
  1093. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1094. return copy_regset_to_user(child, &user_x86_32_view,
  1095. REGSET_GENERAL,
  1096. 0, sizeof(struct user_regs_struct32),
  1097. datap);
  1098. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1099. return copy_regset_from_user(child, &user_x86_32_view,
  1100. REGSET_GENERAL, 0,
  1101. sizeof(struct user_regs_struct32),
  1102. datap);
  1103. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1104. return copy_regset_to_user(child, &user_x86_32_view,
  1105. REGSET_FP, 0,
  1106. sizeof(struct user_i387_ia32_struct),
  1107. datap);
  1108. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1109. return copy_regset_from_user(
  1110. child, &user_x86_32_view, REGSET_FP,
  1111. 0, sizeof(struct user_i387_ia32_struct), datap);
  1112. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  1113. return copy_regset_to_user(child, &user_x86_32_view,
  1114. REGSET_XFP, 0,
  1115. sizeof(struct user32_fxsr_struct),
  1116. datap);
  1117. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  1118. return copy_regset_from_user(child, &user_x86_32_view,
  1119. REGSET_XFP, 0,
  1120. sizeof(struct user32_fxsr_struct),
  1121. datap);
  1122. case PTRACE_GET_THREAD_AREA:
  1123. case PTRACE_SET_THREAD_AREA:
  1124. return arch_ptrace(child, request, addr, data);
  1125. default:
  1126. return compat_ptrace_request(child, request, addr, data);
  1127. }
  1128. return ret;
  1129. }
  1130. #endif /* CONFIG_IA32_EMULATION */
  1131. #ifdef CONFIG_X86_64
  1132. static struct user_regset x86_64_regsets[] __read_mostly = {
  1133. [REGSET_GENERAL] = {
  1134. .core_note_type = NT_PRSTATUS,
  1135. .n = sizeof(struct user_regs_struct) / sizeof(long),
  1136. .size = sizeof(long), .align = sizeof(long),
  1137. .get = genregs_get, .set = genregs_set
  1138. },
  1139. [REGSET_FP] = {
  1140. .core_note_type = NT_PRFPREG,
  1141. .n = sizeof(struct user_i387_struct) / sizeof(long),
  1142. .size = sizeof(long), .align = sizeof(long),
  1143. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1144. },
  1145. [REGSET_XSTATE] = {
  1146. .core_note_type = NT_X86_XSTATE,
  1147. .size = sizeof(u64), .align = sizeof(u64),
  1148. .active = xstateregs_active, .get = xstateregs_get,
  1149. .set = xstateregs_set
  1150. },
  1151. [REGSET_IOPERM64] = {
  1152. .core_note_type = NT_386_IOPERM,
  1153. .n = IO_BITMAP_LONGS,
  1154. .size = sizeof(long), .align = sizeof(long),
  1155. .active = ioperm_active, .get = ioperm_get
  1156. },
  1157. };
  1158. static const struct user_regset_view user_x86_64_view = {
  1159. .name = "x86_64", .e_machine = EM_X86_64,
  1160. .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
  1161. };
  1162. #else /* CONFIG_X86_32 */
  1163. #define user_regs_struct32 user_regs_struct
  1164. #define genregs32_get genregs_get
  1165. #define genregs32_set genregs_set
  1166. #endif /* CONFIG_X86_64 */
  1167. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1168. static struct user_regset x86_32_regsets[] __read_mostly = {
  1169. [REGSET_GENERAL] = {
  1170. .core_note_type = NT_PRSTATUS,
  1171. .n = sizeof(struct user_regs_struct32) / sizeof(u32),
  1172. .size = sizeof(u32), .align = sizeof(u32),
  1173. .get = genregs32_get, .set = genregs32_set
  1174. },
  1175. [REGSET_FP] = {
  1176. .core_note_type = NT_PRFPREG,
  1177. .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
  1178. .size = sizeof(u32), .align = sizeof(u32),
  1179. .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
  1180. },
  1181. [REGSET_XFP] = {
  1182. .core_note_type = NT_PRXFPREG,
  1183. .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
  1184. .size = sizeof(u32), .align = sizeof(u32),
  1185. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1186. },
  1187. [REGSET_XSTATE] = {
  1188. .core_note_type = NT_X86_XSTATE,
  1189. .size = sizeof(u64), .align = sizeof(u64),
  1190. .active = xstateregs_active, .get = xstateregs_get,
  1191. .set = xstateregs_set
  1192. },
  1193. [REGSET_TLS] = {
  1194. .core_note_type = NT_386_TLS,
  1195. .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
  1196. .size = sizeof(struct user_desc),
  1197. .align = sizeof(struct user_desc),
  1198. .active = regset_tls_active,
  1199. .get = regset_tls_get, .set = regset_tls_set
  1200. },
  1201. [REGSET_IOPERM32] = {
  1202. .core_note_type = NT_386_IOPERM,
  1203. .n = IO_BITMAP_BYTES / sizeof(u32),
  1204. .size = sizeof(u32), .align = sizeof(u32),
  1205. .active = ioperm_active, .get = ioperm_get
  1206. },
  1207. };
  1208. static const struct user_regset_view user_x86_32_view = {
  1209. .name = "i386", .e_machine = EM_386,
  1210. .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
  1211. };
  1212. #endif
  1213. /*
  1214. * This represents bytes 464..511 in the memory layout exported through
  1215. * the REGSET_XSTATE interface.
  1216. */
  1217. u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
  1218. void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
  1219. {
  1220. #ifdef CONFIG_X86_64
  1221. x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
  1222. #endif
  1223. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1224. x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
  1225. #endif
  1226. xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
  1227. }
  1228. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1229. {
  1230. #ifdef CONFIG_IA32_EMULATION
  1231. if (test_tsk_thread_flag(task, TIF_IA32))
  1232. #endif
  1233. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1234. return &user_x86_32_view;
  1235. #endif
  1236. #ifdef CONFIG_X86_64
  1237. return &user_x86_64_view;
  1238. #endif
  1239. }
  1240. static void fill_sigtrap_info(struct task_struct *tsk,
  1241. struct pt_regs *regs,
  1242. int error_code, int si_code,
  1243. struct siginfo *info)
  1244. {
  1245. tsk->thread.trap_nr = X86_TRAP_DB;
  1246. tsk->thread.error_code = error_code;
  1247. memset(info, 0, sizeof(*info));
  1248. info->si_signo = SIGTRAP;
  1249. info->si_code = si_code;
  1250. info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
  1251. }
  1252. void user_single_step_siginfo(struct task_struct *tsk,
  1253. struct pt_regs *regs,
  1254. struct siginfo *info)
  1255. {
  1256. fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
  1257. }
  1258. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  1259. int error_code, int si_code)
  1260. {
  1261. struct siginfo info;
  1262. fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
  1263. /* Send us the fake SIGTRAP */
  1264. force_sig_info(SIGTRAP, &info, tsk);
  1265. }
  1266. #ifdef CONFIG_X86_32
  1267. # define IS_IA32 1
  1268. #elif defined CONFIG_IA32_EMULATION
  1269. # define IS_IA32 is_compat_task()
  1270. #else
  1271. # define IS_IA32 0
  1272. #endif
  1273. /*
  1274. * We must return the syscall number to actually look up in the table.
  1275. * This can be -1L to skip running any syscall at all.
  1276. */
  1277. long syscall_trace_enter(struct pt_regs *regs)
  1278. {
  1279. long ret = 0;
  1280. rcu_user_exit();
  1281. /*
  1282. * If we stepped into a sysenter/syscall insn, it trapped in
  1283. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  1284. * If user-mode had set TF itself, then it's still clear from
  1285. * do_debug() and we need to set it again to restore the user
  1286. * state. If we entered on the slow path, TF was already set.
  1287. */
  1288. if (test_thread_flag(TIF_SINGLESTEP))
  1289. regs->flags |= X86_EFLAGS_TF;
  1290. /* do the secure computing check first */
  1291. if (secure_computing(regs->orig_ax)) {
  1292. /* seccomp failures shouldn't expose any additional code. */
  1293. ret = -1L;
  1294. goto out;
  1295. }
  1296. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1297. ret = -1L;
  1298. if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
  1299. tracehook_report_syscall_entry(regs))
  1300. ret = -1L;
  1301. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1302. trace_sys_enter(regs, regs->orig_ax);
  1303. if (IS_IA32)
  1304. audit_syscall_entry(AUDIT_ARCH_I386,
  1305. regs->orig_ax,
  1306. regs->bx, regs->cx,
  1307. regs->dx, regs->si);
  1308. #ifdef CONFIG_X86_64
  1309. else
  1310. audit_syscall_entry(AUDIT_ARCH_X86_64,
  1311. regs->orig_ax,
  1312. regs->di, regs->si,
  1313. regs->dx, regs->r10);
  1314. #endif
  1315. out:
  1316. return ret ?: regs->orig_ax;
  1317. }
  1318. void syscall_trace_leave(struct pt_regs *regs)
  1319. {
  1320. bool step;
  1321. audit_syscall_exit(regs);
  1322. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1323. trace_sys_exit(regs, regs->ax);
  1324. /*
  1325. * If TIF_SYSCALL_EMU is set, we only get here because of
  1326. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  1327. * We already reported this syscall instruction in
  1328. * syscall_trace_enter().
  1329. */
  1330. step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
  1331. !test_thread_flag(TIF_SYSCALL_EMU);
  1332. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  1333. tracehook_report_syscall_exit(regs, step);
  1334. rcu_user_enter();
  1335. }