ptrace.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. *
  6. * BTS tracing
  7. * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/errno.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/regset.h>
  16. #include <linux/tracehook.h>
  17. #include <linux/user.h>
  18. #include <linux/elf.h>
  19. #include <linux/security.h>
  20. #include <linux/audit.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/signal.h>
  23. #include <linux/workqueue.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/system.h>
  27. #include <asm/processor.h>
  28. #include <asm/i387.h>
  29. #include <asm/debugreg.h>
  30. #include <asm/ldt.h>
  31. #include <asm/desc.h>
  32. #include <asm/prctl.h>
  33. #include <asm/proto.h>
  34. #include <asm/ds.h>
  35. #include <asm/hw_breakpoint.h>
  36. #include <trace/syscall.h>
  37. #include "tls.h"
  38. enum x86_regset {
  39. REGSET_GENERAL,
  40. REGSET_FP,
  41. REGSET_XFP,
  42. REGSET_IOPERM64 = REGSET_XFP,
  43. REGSET_TLS,
  44. REGSET_IOPERM32,
  45. };
  46. /*
  47. * does not yet catch signals sent when the child dies.
  48. * in exit.c or in signal.c.
  49. */
  50. /*
  51. * Determines which flags the user has access to [1 = access, 0 = no access].
  52. */
  53. #define FLAG_MASK_32 ((unsigned long) \
  54. (X86_EFLAGS_CF | X86_EFLAGS_PF | \
  55. X86_EFLAGS_AF | X86_EFLAGS_ZF | \
  56. X86_EFLAGS_SF | X86_EFLAGS_TF | \
  57. X86_EFLAGS_DF | X86_EFLAGS_OF | \
  58. X86_EFLAGS_RF | X86_EFLAGS_AC))
  59. /*
  60. * Determines whether a value may be installed in a segment register.
  61. */
  62. static inline bool invalid_selector(u16 value)
  63. {
  64. return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
  65. }
  66. #ifdef CONFIG_X86_32
  67. #define FLAG_MASK FLAG_MASK_32
  68. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
  69. {
  70. BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
  71. return &regs->bx + (regno >> 2);
  72. }
  73. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  74. {
  75. /*
  76. * Returning the value truncates it to 16 bits.
  77. */
  78. unsigned int retval;
  79. if (offset != offsetof(struct user_regs_struct, gs))
  80. retval = *pt_regs_access(task_pt_regs(task), offset);
  81. else {
  82. if (task == current)
  83. retval = get_user_gs(task_pt_regs(task));
  84. else
  85. retval = task_user_gs(task);
  86. }
  87. return retval;
  88. }
  89. static int set_segment_reg(struct task_struct *task,
  90. unsigned long offset, u16 value)
  91. {
  92. /*
  93. * The value argument was already truncated to 16 bits.
  94. */
  95. if (invalid_selector(value))
  96. return -EIO;
  97. /*
  98. * For %cs and %ss we cannot permit a null selector.
  99. * We can permit a bogus selector as long as it has USER_RPL.
  100. * Null selectors are fine for other segment registers, but
  101. * we will never get back to user mode with invalid %cs or %ss
  102. * and will take the trap in iret instead. Much code relies
  103. * on user_mode() to distinguish a user trap frame (which can
  104. * safely use invalid selectors) from a kernel trap frame.
  105. */
  106. switch (offset) {
  107. case offsetof(struct user_regs_struct, cs):
  108. case offsetof(struct user_regs_struct, ss):
  109. if (unlikely(value == 0))
  110. return -EIO;
  111. default:
  112. *pt_regs_access(task_pt_regs(task), offset) = value;
  113. break;
  114. case offsetof(struct user_regs_struct, gs):
  115. if (task == current)
  116. set_user_gs(task_pt_regs(task), value);
  117. else
  118. task_user_gs(task) = value;
  119. }
  120. return 0;
  121. }
  122. #else /* CONFIG_X86_64 */
  123. #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
  124. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
  125. {
  126. BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
  127. return &regs->r15 + (offset / sizeof(regs->r15));
  128. }
  129. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  130. {
  131. /*
  132. * Returning the value truncates it to 16 bits.
  133. */
  134. unsigned int seg;
  135. switch (offset) {
  136. case offsetof(struct user_regs_struct, fs):
  137. if (task == current) {
  138. /* Older gas can't assemble movq %?s,%r?? */
  139. asm("movl %%fs,%0" : "=r" (seg));
  140. return seg;
  141. }
  142. return task->thread.fsindex;
  143. case offsetof(struct user_regs_struct, gs):
  144. if (task == current) {
  145. asm("movl %%gs,%0" : "=r" (seg));
  146. return seg;
  147. }
  148. return task->thread.gsindex;
  149. case offsetof(struct user_regs_struct, ds):
  150. if (task == current) {
  151. asm("movl %%ds,%0" : "=r" (seg));
  152. return seg;
  153. }
  154. return task->thread.ds;
  155. case offsetof(struct user_regs_struct, es):
  156. if (task == current) {
  157. asm("movl %%es,%0" : "=r" (seg));
  158. return seg;
  159. }
  160. return task->thread.es;
  161. case offsetof(struct user_regs_struct, cs):
  162. case offsetof(struct user_regs_struct, ss):
  163. break;
  164. }
  165. return *pt_regs_access(task_pt_regs(task), offset);
  166. }
  167. static int set_segment_reg(struct task_struct *task,
  168. unsigned long offset, u16 value)
  169. {
  170. /*
  171. * The value argument was already truncated to 16 bits.
  172. */
  173. if (invalid_selector(value))
  174. return -EIO;
  175. switch (offset) {
  176. case offsetof(struct user_regs_struct,fs):
  177. /*
  178. * If this is setting fs as for normal 64-bit use but
  179. * setting fs_base has implicitly changed it, leave it.
  180. */
  181. if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
  182. task->thread.fs != 0) ||
  183. (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
  184. task->thread.fs == 0))
  185. break;
  186. task->thread.fsindex = value;
  187. if (task == current)
  188. loadsegment(fs, task->thread.fsindex);
  189. break;
  190. case offsetof(struct user_regs_struct,gs):
  191. /*
  192. * If this is setting gs as for normal 64-bit use but
  193. * setting gs_base has implicitly changed it, leave it.
  194. */
  195. if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
  196. task->thread.gs != 0) ||
  197. (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
  198. task->thread.gs == 0))
  199. break;
  200. task->thread.gsindex = value;
  201. if (task == current)
  202. load_gs_index(task->thread.gsindex);
  203. break;
  204. case offsetof(struct user_regs_struct,ds):
  205. task->thread.ds = value;
  206. if (task == current)
  207. loadsegment(ds, task->thread.ds);
  208. break;
  209. case offsetof(struct user_regs_struct,es):
  210. task->thread.es = value;
  211. if (task == current)
  212. loadsegment(es, task->thread.es);
  213. break;
  214. /*
  215. * Can't actually change these in 64-bit mode.
  216. */
  217. case offsetof(struct user_regs_struct,cs):
  218. if (unlikely(value == 0))
  219. return -EIO;
  220. #ifdef CONFIG_IA32_EMULATION
  221. if (test_tsk_thread_flag(task, TIF_IA32))
  222. task_pt_regs(task)->cs = value;
  223. #endif
  224. break;
  225. case offsetof(struct user_regs_struct,ss):
  226. if (unlikely(value == 0))
  227. return -EIO;
  228. #ifdef CONFIG_IA32_EMULATION
  229. if (test_tsk_thread_flag(task, TIF_IA32))
  230. task_pt_regs(task)->ss = value;
  231. #endif
  232. break;
  233. }
  234. return 0;
  235. }
  236. #endif /* CONFIG_X86_32 */
  237. static unsigned long get_flags(struct task_struct *task)
  238. {
  239. unsigned long retval = task_pt_regs(task)->flags;
  240. /*
  241. * If the debugger set TF, hide it from the readout.
  242. */
  243. if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  244. retval &= ~X86_EFLAGS_TF;
  245. return retval;
  246. }
  247. static int set_flags(struct task_struct *task, unsigned long value)
  248. {
  249. struct pt_regs *regs = task_pt_regs(task);
  250. /*
  251. * If the user value contains TF, mark that
  252. * it was not "us" (the debugger) that set it.
  253. * If not, make sure it stays set if we had.
  254. */
  255. if (value & X86_EFLAGS_TF)
  256. clear_tsk_thread_flag(task, TIF_FORCED_TF);
  257. else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  258. value |= X86_EFLAGS_TF;
  259. regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
  260. return 0;
  261. }
  262. static int putreg(struct task_struct *child,
  263. unsigned long offset, unsigned long value)
  264. {
  265. switch (offset) {
  266. case offsetof(struct user_regs_struct, cs):
  267. case offsetof(struct user_regs_struct, ds):
  268. case offsetof(struct user_regs_struct, es):
  269. case offsetof(struct user_regs_struct, fs):
  270. case offsetof(struct user_regs_struct, gs):
  271. case offsetof(struct user_regs_struct, ss):
  272. return set_segment_reg(child, offset, value);
  273. case offsetof(struct user_regs_struct, flags):
  274. return set_flags(child, value);
  275. #ifdef CONFIG_X86_64
  276. /*
  277. * Orig_ax is really just a flag with small positive and
  278. * negative values, so make sure to always sign-extend it
  279. * from 32 bits so that it works correctly regardless of
  280. * whether we come from a 32-bit environment or not.
  281. */
  282. case offsetof(struct user_regs_struct, orig_ax):
  283. value = (long) (s32) value;
  284. break;
  285. case offsetof(struct user_regs_struct,fs_base):
  286. if (value >= TASK_SIZE_OF(child))
  287. return -EIO;
  288. /*
  289. * When changing the segment base, use do_arch_prctl
  290. * to set either thread.fs or thread.fsindex and the
  291. * corresponding GDT slot.
  292. */
  293. if (child->thread.fs != value)
  294. return do_arch_prctl(child, ARCH_SET_FS, value);
  295. return 0;
  296. case offsetof(struct user_regs_struct,gs_base):
  297. /*
  298. * Exactly the same here as the %fs handling above.
  299. */
  300. if (value >= TASK_SIZE_OF(child))
  301. return -EIO;
  302. if (child->thread.gs != value)
  303. return do_arch_prctl(child, ARCH_SET_GS, value);
  304. return 0;
  305. #endif
  306. }
  307. *pt_regs_access(task_pt_regs(child), offset) = value;
  308. return 0;
  309. }
  310. static unsigned long getreg(struct task_struct *task, unsigned long offset)
  311. {
  312. switch (offset) {
  313. case offsetof(struct user_regs_struct, cs):
  314. case offsetof(struct user_regs_struct, ds):
  315. case offsetof(struct user_regs_struct, es):
  316. case offsetof(struct user_regs_struct, fs):
  317. case offsetof(struct user_regs_struct, gs):
  318. case offsetof(struct user_regs_struct, ss):
  319. return get_segment_reg(task, offset);
  320. case offsetof(struct user_regs_struct, flags):
  321. return get_flags(task);
  322. #ifdef CONFIG_X86_64
  323. case offsetof(struct user_regs_struct, fs_base): {
  324. /*
  325. * do_arch_prctl may have used a GDT slot instead of
  326. * the MSR. To userland, it appears the same either
  327. * way, except the %fs segment selector might not be 0.
  328. */
  329. unsigned int seg = task->thread.fsindex;
  330. if (task->thread.fs != 0)
  331. return task->thread.fs;
  332. if (task == current)
  333. asm("movl %%fs,%0" : "=r" (seg));
  334. if (seg != FS_TLS_SEL)
  335. return 0;
  336. return get_desc_base(&task->thread.tls_array[FS_TLS]);
  337. }
  338. case offsetof(struct user_regs_struct, gs_base): {
  339. /*
  340. * Exactly the same here as the %fs handling above.
  341. */
  342. unsigned int seg = task->thread.gsindex;
  343. if (task->thread.gs != 0)
  344. return task->thread.gs;
  345. if (task == current)
  346. asm("movl %%gs,%0" : "=r" (seg));
  347. if (seg != GS_TLS_SEL)
  348. return 0;
  349. return get_desc_base(&task->thread.tls_array[GS_TLS]);
  350. }
  351. #endif
  352. }
  353. return *pt_regs_access(task_pt_regs(task), offset);
  354. }
  355. static int genregs_get(struct task_struct *target,
  356. const struct user_regset *regset,
  357. unsigned int pos, unsigned int count,
  358. void *kbuf, void __user *ubuf)
  359. {
  360. if (kbuf) {
  361. unsigned long *k = kbuf;
  362. while (count > 0) {
  363. *k++ = getreg(target, pos);
  364. count -= sizeof(*k);
  365. pos += sizeof(*k);
  366. }
  367. } else {
  368. unsigned long __user *u = ubuf;
  369. while (count > 0) {
  370. if (__put_user(getreg(target, pos), u++))
  371. return -EFAULT;
  372. count -= sizeof(*u);
  373. pos += sizeof(*u);
  374. }
  375. }
  376. return 0;
  377. }
  378. static int genregs_set(struct task_struct *target,
  379. const struct user_regset *regset,
  380. unsigned int pos, unsigned int count,
  381. const void *kbuf, const void __user *ubuf)
  382. {
  383. int ret = 0;
  384. if (kbuf) {
  385. const unsigned long *k = kbuf;
  386. while (count > 0 && !ret) {
  387. ret = putreg(target, pos, *k++);
  388. count -= sizeof(*k);
  389. pos += sizeof(*k);
  390. }
  391. } else {
  392. const unsigned long __user *u = ubuf;
  393. while (count > 0 && !ret) {
  394. unsigned long word;
  395. ret = __get_user(word, u++);
  396. if (ret)
  397. break;
  398. ret = putreg(target, pos, word);
  399. count -= sizeof(*u);
  400. pos += sizeof(*u);
  401. }
  402. }
  403. return ret;
  404. }
  405. /*
  406. * Decode the length and type bits for a particular breakpoint as
  407. * stored in debug register 7. Return the "enabled" status.
  408. */
  409. static int decode_dr7(unsigned long dr7, int bpnum, unsigned *len,
  410. unsigned *type)
  411. {
  412. int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
  413. *len = (bp_info & 0xc) | 0x40;
  414. *type = (bp_info & 0x3) | 0x80;
  415. return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
  416. }
  417. static void ptrace_triggered(struct hw_breakpoint *bp, struct pt_regs *regs)
  418. {
  419. struct thread_struct *thread = &(current->thread);
  420. int i;
  421. /*
  422. * Store in the virtual DR6 register the fact that the breakpoint
  423. * was hit so the thread's debugger will see it.
  424. */
  425. for (i = 0; i < hbp_kernel_pos; i++)
  426. /*
  427. * We will check bp->info.address against the address stored in
  428. * thread's hbp structure and not debugreg[i]. This is to ensure
  429. * that the corresponding bit for 'i' in DR7 register is enabled
  430. */
  431. if (bp->info.address == thread->hbp[i]->info.address)
  432. break;
  433. thread->debugreg6 |= (DR_TRAP0 << i);
  434. }
  435. /*
  436. * Handle ptrace writes to debug register 7.
  437. */
  438. static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
  439. {
  440. struct thread_struct *thread = &(tsk->thread);
  441. unsigned long old_dr7 = thread->debugreg7;
  442. int i, orig_ret = 0, rc = 0;
  443. int enabled, second_pass = 0;
  444. unsigned len, type;
  445. struct hw_breakpoint *bp;
  446. data &= ~DR_CONTROL_RESERVED;
  447. restore:
  448. /*
  449. * Loop through all the hardware breakpoints, making the
  450. * appropriate changes to each.
  451. */
  452. for (i = 0; i < HBP_NUM; i++) {
  453. enabled = decode_dr7(data, i, &len, &type);
  454. bp = thread->hbp[i];
  455. if (!enabled) {
  456. if (bp) {
  457. /* Don't unregister the breakpoints right-away,
  458. * unless all register_user_hw_breakpoint()
  459. * requests have succeeded. This prevents
  460. * any window of opportunity for debug
  461. * register grabbing by other users.
  462. */
  463. if (!second_pass)
  464. continue;
  465. unregister_user_hw_breakpoint(tsk, bp);
  466. kfree(bp);
  467. }
  468. continue;
  469. }
  470. if (!bp) {
  471. rc = -ENOMEM;
  472. bp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL);
  473. if (bp) {
  474. bp->info.address = thread->debugreg[i];
  475. bp->triggered = ptrace_triggered;
  476. bp->info.len = len;
  477. bp->info.type = type;
  478. rc = register_user_hw_breakpoint(tsk, bp);
  479. if (rc)
  480. kfree(bp);
  481. }
  482. } else
  483. rc = modify_user_hw_breakpoint(tsk, bp);
  484. if (rc)
  485. break;
  486. }
  487. /*
  488. * Make a second pass to free the remaining unused breakpoints
  489. * or to restore the original breakpoints if an error occurred.
  490. */
  491. if (!second_pass) {
  492. second_pass = 1;
  493. if (rc < 0) {
  494. orig_ret = rc;
  495. data = old_dr7;
  496. }
  497. goto restore;
  498. }
  499. return ((orig_ret < 0) ? orig_ret : rc);
  500. }
  501. /*
  502. * Handle PTRACE_PEEKUSR calls for the debug register area.
  503. */
  504. unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
  505. {
  506. struct thread_struct *thread = &(tsk->thread);
  507. unsigned long val = 0;
  508. if (n < HBP_NUM)
  509. val = thread->debugreg[n];
  510. else if (n == 6)
  511. val = thread->debugreg6;
  512. else if (n == 7)
  513. val = thread->debugreg7;
  514. return val;
  515. }
  516. /*
  517. * Handle PTRACE_POKEUSR calls for the debug register area.
  518. */
  519. int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
  520. {
  521. struct thread_struct *thread = &(tsk->thread);
  522. int rc = 0;
  523. /* There are no DR4 or DR5 registers */
  524. if (n == 4 || n == 5)
  525. return -EIO;
  526. if (n == 6) {
  527. tsk->thread.debugreg6 = val;
  528. goto ret_path;
  529. }
  530. if (n < HBP_NUM) {
  531. if (thread->hbp[n]) {
  532. if (arch_check_va_in_userspace(val,
  533. thread->hbp[n]->info.len) == 0) {
  534. rc = -EIO;
  535. goto ret_path;
  536. }
  537. thread->hbp[n]->info.address = val;
  538. }
  539. thread->debugreg[n] = val;
  540. }
  541. /* All that's left is DR7 */
  542. if (n == 7)
  543. rc = ptrace_write_dr7(tsk, val);
  544. ret_path:
  545. return rc;
  546. }
  547. /*
  548. * These access the current or another (stopped) task's io permission
  549. * bitmap for debugging or core dump.
  550. */
  551. static int ioperm_active(struct task_struct *target,
  552. const struct user_regset *regset)
  553. {
  554. return target->thread.io_bitmap_max / regset->size;
  555. }
  556. static int ioperm_get(struct task_struct *target,
  557. const struct user_regset *regset,
  558. unsigned int pos, unsigned int count,
  559. void *kbuf, void __user *ubuf)
  560. {
  561. if (!target->thread.io_bitmap_ptr)
  562. return -ENXIO;
  563. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  564. target->thread.io_bitmap_ptr,
  565. 0, IO_BITMAP_BYTES);
  566. }
  567. #ifdef CONFIG_X86_PTRACE_BTS
  568. /*
  569. * A branch trace store context.
  570. *
  571. * Contexts may only be installed by ptrace_bts_config() and only for
  572. * ptraced tasks.
  573. *
  574. * Contexts are destroyed when the tracee is detached from the tracer.
  575. * The actual destruction work requires interrupts enabled, so the
  576. * work is deferred and will be scheduled during __ptrace_unlink().
  577. *
  578. * Contexts hold an additional task_struct reference on the traced
  579. * task, as well as a reference on the tracer's mm.
  580. *
  581. * Ptrace already holds a task_struct for the duration of ptrace operations,
  582. * but since destruction is deferred, it may be executed after both
  583. * tracer and tracee exited.
  584. */
  585. struct bts_context {
  586. /* The branch trace handle. */
  587. struct bts_tracer *tracer;
  588. /* The buffer used to store the branch trace and its size. */
  589. void *buffer;
  590. unsigned int size;
  591. /* The mm that paid for the above buffer. */
  592. struct mm_struct *mm;
  593. /* The task this context belongs to. */
  594. struct task_struct *task;
  595. /* The signal to send on a bts buffer overflow. */
  596. unsigned int bts_ovfl_signal;
  597. /* The work struct to destroy a context. */
  598. struct work_struct work;
  599. };
  600. static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
  601. {
  602. void *buffer = NULL;
  603. int err = -ENOMEM;
  604. err = account_locked_memory(current->mm, current->signal->rlim, size);
  605. if (err < 0)
  606. return err;
  607. buffer = kzalloc(size, GFP_KERNEL);
  608. if (!buffer)
  609. goto out_refund;
  610. context->buffer = buffer;
  611. context->size = size;
  612. context->mm = get_task_mm(current);
  613. return 0;
  614. out_refund:
  615. refund_locked_memory(current->mm, size);
  616. return err;
  617. }
  618. static inline void free_bts_buffer(struct bts_context *context)
  619. {
  620. if (!context->buffer)
  621. return;
  622. kfree(context->buffer);
  623. context->buffer = NULL;
  624. refund_locked_memory(context->mm, context->size);
  625. context->size = 0;
  626. mmput(context->mm);
  627. context->mm = NULL;
  628. }
  629. static void free_bts_context_work(struct work_struct *w)
  630. {
  631. struct bts_context *context;
  632. context = container_of(w, struct bts_context, work);
  633. ds_release_bts(context->tracer);
  634. put_task_struct(context->task);
  635. free_bts_buffer(context);
  636. kfree(context);
  637. }
  638. static inline void free_bts_context(struct bts_context *context)
  639. {
  640. INIT_WORK(&context->work, free_bts_context_work);
  641. schedule_work(&context->work);
  642. }
  643. static inline struct bts_context *alloc_bts_context(struct task_struct *task)
  644. {
  645. struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
  646. if (context) {
  647. context->task = task;
  648. task->bts = context;
  649. get_task_struct(task);
  650. }
  651. return context;
  652. }
  653. static int ptrace_bts_read_record(struct task_struct *child, size_t index,
  654. struct bts_struct __user *out)
  655. {
  656. struct bts_context *context;
  657. const struct bts_trace *trace;
  658. struct bts_struct bts;
  659. const unsigned char *at;
  660. int error;
  661. context = child->bts;
  662. if (!context)
  663. return -ESRCH;
  664. trace = ds_read_bts(context->tracer);
  665. if (!trace)
  666. return -ESRCH;
  667. at = trace->ds.top - ((index + 1) * trace->ds.size);
  668. if ((void *)at < trace->ds.begin)
  669. at += (trace->ds.n * trace->ds.size);
  670. if (!trace->read)
  671. return -EOPNOTSUPP;
  672. error = trace->read(context->tracer, at, &bts);
  673. if (error < 0)
  674. return error;
  675. if (copy_to_user(out, &bts, sizeof(bts)))
  676. return -EFAULT;
  677. return sizeof(bts);
  678. }
  679. static int ptrace_bts_drain(struct task_struct *child,
  680. long size,
  681. struct bts_struct __user *out)
  682. {
  683. struct bts_context *context;
  684. const struct bts_trace *trace;
  685. const unsigned char *at;
  686. int error, drained = 0;
  687. context = child->bts;
  688. if (!context)
  689. return -ESRCH;
  690. trace = ds_read_bts(context->tracer);
  691. if (!trace)
  692. return -ESRCH;
  693. if (!trace->read)
  694. return -EOPNOTSUPP;
  695. if (size < (trace->ds.top - trace->ds.begin))
  696. return -EIO;
  697. for (at = trace->ds.begin; (void *)at < trace->ds.top;
  698. out++, drained++, at += trace->ds.size) {
  699. struct bts_struct bts;
  700. error = trace->read(context->tracer, at, &bts);
  701. if (error < 0)
  702. return error;
  703. if (copy_to_user(out, &bts, sizeof(bts)))
  704. return -EFAULT;
  705. }
  706. memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
  707. error = ds_reset_bts(context->tracer);
  708. if (error < 0)
  709. return error;
  710. return drained;
  711. }
  712. static int ptrace_bts_config(struct task_struct *child,
  713. long cfg_size,
  714. const struct ptrace_bts_config __user *ucfg)
  715. {
  716. struct bts_context *context;
  717. struct ptrace_bts_config cfg;
  718. unsigned int flags = 0;
  719. if (cfg_size < sizeof(cfg))
  720. return -EIO;
  721. if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
  722. return -EFAULT;
  723. context = child->bts;
  724. if (!context)
  725. context = alloc_bts_context(child);
  726. if (!context)
  727. return -ENOMEM;
  728. if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
  729. if (!cfg.signal)
  730. return -EINVAL;
  731. return -EOPNOTSUPP;
  732. context->bts_ovfl_signal = cfg.signal;
  733. }
  734. ds_release_bts(context->tracer);
  735. context->tracer = NULL;
  736. if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
  737. int err;
  738. free_bts_buffer(context);
  739. if (!cfg.size)
  740. return 0;
  741. err = alloc_bts_buffer(context, cfg.size);
  742. if (err < 0)
  743. return err;
  744. }
  745. if (cfg.flags & PTRACE_BTS_O_TRACE)
  746. flags |= BTS_USER;
  747. if (cfg.flags & PTRACE_BTS_O_SCHED)
  748. flags |= BTS_TIMESTAMPS;
  749. context->tracer =
  750. ds_request_bts_task(child, context->buffer, context->size,
  751. NULL, (size_t)-1, flags);
  752. if (unlikely(IS_ERR(context->tracer))) {
  753. int error = PTR_ERR(context->tracer);
  754. free_bts_buffer(context);
  755. context->tracer = NULL;
  756. return error;
  757. }
  758. return sizeof(cfg);
  759. }
  760. static int ptrace_bts_status(struct task_struct *child,
  761. long cfg_size,
  762. struct ptrace_bts_config __user *ucfg)
  763. {
  764. struct bts_context *context;
  765. const struct bts_trace *trace;
  766. struct ptrace_bts_config cfg;
  767. context = child->bts;
  768. if (!context)
  769. return -ESRCH;
  770. if (cfg_size < sizeof(cfg))
  771. return -EIO;
  772. trace = ds_read_bts(context->tracer);
  773. if (!trace)
  774. return -ESRCH;
  775. memset(&cfg, 0, sizeof(cfg));
  776. cfg.size = trace->ds.end - trace->ds.begin;
  777. cfg.signal = context->bts_ovfl_signal;
  778. cfg.bts_size = sizeof(struct bts_struct);
  779. if (cfg.signal)
  780. cfg.flags |= PTRACE_BTS_O_SIGNAL;
  781. if (trace->ds.flags & BTS_USER)
  782. cfg.flags |= PTRACE_BTS_O_TRACE;
  783. if (trace->ds.flags & BTS_TIMESTAMPS)
  784. cfg.flags |= PTRACE_BTS_O_SCHED;
  785. if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
  786. return -EFAULT;
  787. return sizeof(cfg);
  788. }
  789. static int ptrace_bts_clear(struct task_struct *child)
  790. {
  791. struct bts_context *context;
  792. const struct bts_trace *trace;
  793. context = child->bts;
  794. if (!context)
  795. return -ESRCH;
  796. trace = ds_read_bts(context->tracer);
  797. if (!trace)
  798. return -ESRCH;
  799. memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
  800. return ds_reset_bts(context->tracer);
  801. }
  802. static int ptrace_bts_size(struct task_struct *child)
  803. {
  804. struct bts_context *context;
  805. const struct bts_trace *trace;
  806. context = child->bts;
  807. if (!context)
  808. return -ESRCH;
  809. trace = ds_read_bts(context->tracer);
  810. if (!trace)
  811. return -ESRCH;
  812. return (trace->ds.top - trace->ds.begin) / trace->ds.size;
  813. }
  814. /*
  815. * Called from __ptrace_unlink() after the child has been moved back
  816. * to its original parent.
  817. */
  818. void ptrace_bts_untrace(struct task_struct *child)
  819. {
  820. if (unlikely(child->bts)) {
  821. free_bts_context(child->bts);
  822. child->bts = NULL;
  823. }
  824. }
  825. #endif /* CONFIG_X86_PTRACE_BTS */
  826. /*
  827. * Called by kernel/ptrace.c when detaching..
  828. *
  829. * Make sure the single step bit is not set.
  830. */
  831. void ptrace_disable(struct task_struct *child)
  832. {
  833. user_disable_single_step(child);
  834. #ifdef TIF_SYSCALL_EMU
  835. clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
  836. #endif
  837. }
  838. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  839. static const struct user_regset_view user_x86_32_view; /* Initialized below. */
  840. #endif
  841. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  842. {
  843. int ret;
  844. unsigned long __user *datap = (unsigned long __user *)data;
  845. switch (request) {
  846. /* read the word at location addr in the USER area. */
  847. case PTRACE_PEEKUSR: {
  848. unsigned long tmp;
  849. ret = -EIO;
  850. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  851. addr >= sizeof(struct user))
  852. break;
  853. tmp = 0; /* Default return condition */
  854. if (addr < sizeof(struct user_regs_struct))
  855. tmp = getreg(child, addr);
  856. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  857. addr <= offsetof(struct user, u_debugreg[7])) {
  858. addr -= offsetof(struct user, u_debugreg[0]);
  859. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  860. }
  861. ret = put_user(tmp, datap);
  862. break;
  863. }
  864. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  865. ret = -EIO;
  866. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  867. addr >= sizeof(struct user))
  868. break;
  869. if (addr < sizeof(struct user_regs_struct))
  870. ret = putreg(child, addr, data);
  871. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  872. addr <= offsetof(struct user, u_debugreg[7])) {
  873. addr -= offsetof(struct user, u_debugreg[0]);
  874. ret = ptrace_set_debugreg(child,
  875. addr / sizeof(data), data);
  876. }
  877. break;
  878. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  879. return copy_regset_to_user(child,
  880. task_user_regset_view(current),
  881. REGSET_GENERAL,
  882. 0, sizeof(struct user_regs_struct),
  883. datap);
  884. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  885. return copy_regset_from_user(child,
  886. task_user_regset_view(current),
  887. REGSET_GENERAL,
  888. 0, sizeof(struct user_regs_struct),
  889. datap);
  890. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  891. return copy_regset_to_user(child,
  892. task_user_regset_view(current),
  893. REGSET_FP,
  894. 0, sizeof(struct user_i387_struct),
  895. datap);
  896. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  897. return copy_regset_from_user(child,
  898. task_user_regset_view(current),
  899. REGSET_FP,
  900. 0, sizeof(struct user_i387_struct),
  901. datap);
  902. #ifdef CONFIG_X86_32
  903. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  904. return copy_regset_to_user(child, &user_x86_32_view,
  905. REGSET_XFP,
  906. 0, sizeof(struct user_fxsr_struct),
  907. datap) ? -EIO : 0;
  908. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  909. return copy_regset_from_user(child, &user_x86_32_view,
  910. REGSET_XFP,
  911. 0, sizeof(struct user_fxsr_struct),
  912. datap) ? -EIO : 0;
  913. #endif
  914. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  915. case PTRACE_GET_THREAD_AREA:
  916. if (addr < 0)
  917. return -EIO;
  918. ret = do_get_thread_area(child, addr,
  919. (struct user_desc __user *) data);
  920. break;
  921. case PTRACE_SET_THREAD_AREA:
  922. if (addr < 0)
  923. return -EIO;
  924. ret = do_set_thread_area(child, addr,
  925. (struct user_desc __user *) data, 0);
  926. break;
  927. #endif
  928. #ifdef CONFIG_X86_64
  929. /* normal 64bit interface to access TLS data.
  930. Works just like arch_prctl, except that the arguments
  931. are reversed. */
  932. case PTRACE_ARCH_PRCTL:
  933. ret = do_arch_prctl(child, data, addr);
  934. break;
  935. #endif
  936. /*
  937. * These bits need more cooking - not enabled yet:
  938. */
  939. #ifdef CONFIG_X86_PTRACE_BTS
  940. case PTRACE_BTS_CONFIG:
  941. ret = ptrace_bts_config
  942. (child, data, (struct ptrace_bts_config __user *)addr);
  943. break;
  944. case PTRACE_BTS_STATUS:
  945. ret = ptrace_bts_status
  946. (child, data, (struct ptrace_bts_config __user *)addr);
  947. break;
  948. case PTRACE_BTS_SIZE:
  949. ret = ptrace_bts_size(child);
  950. break;
  951. case PTRACE_BTS_GET:
  952. ret = ptrace_bts_read_record
  953. (child, data, (struct bts_struct __user *) addr);
  954. break;
  955. case PTRACE_BTS_CLEAR:
  956. ret = ptrace_bts_clear(child);
  957. break;
  958. case PTRACE_BTS_DRAIN:
  959. ret = ptrace_bts_drain
  960. (child, data, (struct bts_struct __user *) addr);
  961. break;
  962. #endif /* CONFIG_X86_PTRACE_BTS */
  963. default:
  964. ret = ptrace_request(child, request, addr, data);
  965. break;
  966. }
  967. return ret;
  968. }
  969. #ifdef CONFIG_IA32_EMULATION
  970. #include <linux/compat.h>
  971. #include <linux/syscalls.h>
  972. #include <asm/ia32.h>
  973. #include <asm/user32.h>
  974. #define R32(l,q) \
  975. case offsetof(struct user32, regs.l): \
  976. regs->q = value; break
  977. #define SEG32(rs) \
  978. case offsetof(struct user32, regs.rs): \
  979. return set_segment_reg(child, \
  980. offsetof(struct user_regs_struct, rs), \
  981. value); \
  982. break
  983. static int putreg32(struct task_struct *child, unsigned regno, u32 value)
  984. {
  985. struct pt_regs *regs = task_pt_regs(child);
  986. switch (regno) {
  987. SEG32(cs);
  988. SEG32(ds);
  989. SEG32(es);
  990. SEG32(fs);
  991. SEG32(gs);
  992. SEG32(ss);
  993. R32(ebx, bx);
  994. R32(ecx, cx);
  995. R32(edx, dx);
  996. R32(edi, di);
  997. R32(esi, si);
  998. R32(ebp, bp);
  999. R32(eax, ax);
  1000. R32(eip, ip);
  1001. R32(esp, sp);
  1002. case offsetof(struct user32, regs.orig_eax):
  1003. /*
  1004. * Sign-extend the value so that orig_eax = -1
  1005. * causes (long)orig_ax < 0 tests to fire correctly.
  1006. */
  1007. regs->orig_ax = (long) (s32) value;
  1008. break;
  1009. case offsetof(struct user32, regs.eflags):
  1010. return set_flags(child, value);
  1011. case offsetof(struct user32, u_debugreg[0]) ...
  1012. offsetof(struct user32, u_debugreg[7]):
  1013. regno -= offsetof(struct user32, u_debugreg[0]);
  1014. return ptrace_set_debugreg(child, regno / 4, value);
  1015. default:
  1016. if (regno > sizeof(struct user32) || (regno & 3))
  1017. return -EIO;
  1018. /*
  1019. * Other dummy fields in the virtual user structure
  1020. * are ignored
  1021. */
  1022. break;
  1023. }
  1024. return 0;
  1025. }
  1026. #undef R32
  1027. #undef SEG32
  1028. #define R32(l,q) \
  1029. case offsetof(struct user32, regs.l): \
  1030. *val = regs->q; break
  1031. #define SEG32(rs) \
  1032. case offsetof(struct user32, regs.rs): \
  1033. *val = get_segment_reg(child, \
  1034. offsetof(struct user_regs_struct, rs)); \
  1035. break
  1036. static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
  1037. {
  1038. struct pt_regs *regs = task_pt_regs(child);
  1039. switch (regno) {
  1040. SEG32(ds);
  1041. SEG32(es);
  1042. SEG32(fs);
  1043. SEG32(gs);
  1044. R32(cs, cs);
  1045. R32(ss, ss);
  1046. R32(ebx, bx);
  1047. R32(ecx, cx);
  1048. R32(edx, dx);
  1049. R32(edi, di);
  1050. R32(esi, si);
  1051. R32(ebp, bp);
  1052. R32(eax, ax);
  1053. R32(orig_eax, orig_ax);
  1054. R32(eip, ip);
  1055. R32(esp, sp);
  1056. case offsetof(struct user32, regs.eflags):
  1057. *val = get_flags(child);
  1058. break;
  1059. case offsetof(struct user32, u_debugreg[0]) ...
  1060. offsetof(struct user32, u_debugreg[7]):
  1061. regno -= offsetof(struct user32, u_debugreg[0]);
  1062. *val = ptrace_get_debugreg(child, regno / 4);
  1063. break;
  1064. default:
  1065. if (regno > sizeof(struct user32) || (regno & 3))
  1066. return -EIO;
  1067. /*
  1068. * Other dummy fields in the virtual user structure
  1069. * are ignored
  1070. */
  1071. *val = 0;
  1072. break;
  1073. }
  1074. return 0;
  1075. }
  1076. #undef R32
  1077. #undef SEG32
  1078. static int genregs32_get(struct task_struct *target,
  1079. const struct user_regset *regset,
  1080. unsigned int pos, unsigned int count,
  1081. void *kbuf, void __user *ubuf)
  1082. {
  1083. if (kbuf) {
  1084. compat_ulong_t *k = kbuf;
  1085. while (count > 0) {
  1086. getreg32(target, pos, k++);
  1087. count -= sizeof(*k);
  1088. pos += sizeof(*k);
  1089. }
  1090. } else {
  1091. compat_ulong_t __user *u = ubuf;
  1092. while (count > 0) {
  1093. compat_ulong_t word;
  1094. getreg32(target, pos, &word);
  1095. if (__put_user(word, u++))
  1096. return -EFAULT;
  1097. count -= sizeof(*u);
  1098. pos += sizeof(*u);
  1099. }
  1100. }
  1101. return 0;
  1102. }
  1103. static int genregs32_set(struct task_struct *target,
  1104. const struct user_regset *regset,
  1105. unsigned int pos, unsigned int count,
  1106. const void *kbuf, const void __user *ubuf)
  1107. {
  1108. int ret = 0;
  1109. if (kbuf) {
  1110. const compat_ulong_t *k = kbuf;
  1111. while (count > 0 && !ret) {
  1112. ret = putreg32(target, pos, *k++);
  1113. count -= sizeof(*k);
  1114. pos += sizeof(*k);
  1115. }
  1116. } else {
  1117. const compat_ulong_t __user *u = ubuf;
  1118. while (count > 0 && !ret) {
  1119. compat_ulong_t word;
  1120. ret = __get_user(word, u++);
  1121. if (ret)
  1122. break;
  1123. ret = putreg32(target, pos, word);
  1124. count -= sizeof(*u);
  1125. pos += sizeof(*u);
  1126. }
  1127. }
  1128. return ret;
  1129. }
  1130. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  1131. compat_ulong_t caddr, compat_ulong_t cdata)
  1132. {
  1133. unsigned long addr = caddr;
  1134. unsigned long data = cdata;
  1135. void __user *datap = compat_ptr(data);
  1136. int ret;
  1137. __u32 val;
  1138. switch (request) {
  1139. case PTRACE_PEEKUSR:
  1140. ret = getreg32(child, addr, &val);
  1141. if (ret == 0)
  1142. ret = put_user(val, (__u32 __user *)datap);
  1143. break;
  1144. case PTRACE_POKEUSR:
  1145. ret = putreg32(child, addr, data);
  1146. break;
  1147. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1148. return copy_regset_to_user(child, &user_x86_32_view,
  1149. REGSET_GENERAL,
  1150. 0, sizeof(struct user_regs_struct32),
  1151. datap);
  1152. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1153. return copy_regset_from_user(child, &user_x86_32_view,
  1154. REGSET_GENERAL, 0,
  1155. sizeof(struct user_regs_struct32),
  1156. datap);
  1157. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1158. return copy_regset_to_user(child, &user_x86_32_view,
  1159. REGSET_FP, 0,
  1160. sizeof(struct user_i387_ia32_struct),
  1161. datap);
  1162. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1163. return copy_regset_from_user(
  1164. child, &user_x86_32_view, REGSET_FP,
  1165. 0, sizeof(struct user_i387_ia32_struct), datap);
  1166. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  1167. return copy_regset_to_user(child, &user_x86_32_view,
  1168. REGSET_XFP, 0,
  1169. sizeof(struct user32_fxsr_struct),
  1170. datap);
  1171. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  1172. return copy_regset_from_user(child, &user_x86_32_view,
  1173. REGSET_XFP, 0,
  1174. sizeof(struct user32_fxsr_struct),
  1175. datap);
  1176. case PTRACE_GET_THREAD_AREA:
  1177. case PTRACE_SET_THREAD_AREA:
  1178. #ifdef CONFIG_X86_PTRACE_BTS
  1179. case PTRACE_BTS_CONFIG:
  1180. case PTRACE_BTS_STATUS:
  1181. case PTRACE_BTS_SIZE:
  1182. case PTRACE_BTS_GET:
  1183. case PTRACE_BTS_CLEAR:
  1184. case PTRACE_BTS_DRAIN:
  1185. #endif /* CONFIG_X86_PTRACE_BTS */
  1186. return arch_ptrace(child, request, addr, data);
  1187. default:
  1188. return compat_ptrace_request(child, request, addr, data);
  1189. }
  1190. return ret;
  1191. }
  1192. #endif /* CONFIG_IA32_EMULATION */
  1193. #ifdef CONFIG_X86_64
  1194. static const struct user_regset x86_64_regsets[] = {
  1195. [REGSET_GENERAL] = {
  1196. .core_note_type = NT_PRSTATUS,
  1197. .n = sizeof(struct user_regs_struct) / sizeof(long),
  1198. .size = sizeof(long), .align = sizeof(long),
  1199. .get = genregs_get, .set = genregs_set
  1200. },
  1201. [REGSET_FP] = {
  1202. .core_note_type = NT_PRFPREG,
  1203. .n = sizeof(struct user_i387_struct) / sizeof(long),
  1204. .size = sizeof(long), .align = sizeof(long),
  1205. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1206. },
  1207. [REGSET_IOPERM64] = {
  1208. .core_note_type = NT_386_IOPERM,
  1209. .n = IO_BITMAP_LONGS,
  1210. .size = sizeof(long), .align = sizeof(long),
  1211. .active = ioperm_active, .get = ioperm_get
  1212. },
  1213. };
  1214. static const struct user_regset_view user_x86_64_view = {
  1215. .name = "x86_64", .e_machine = EM_X86_64,
  1216. .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
  1217. };
  1218. #else /* CONFIG_X86_32 */
  1219. #define user_regs_struct32 user_regs_struct
  1220. #define genregs32_get genregs_get
  1221. #define genregs32_set genregs_set
  1222. #define user_i387_ia32_struct user_i387_struct
  1223. #define user32_fxsr_struct user_fxsr_struct
  1224. #endif /* CONFIG_X86_64 */
  1225. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1226. static const struct user_regset x86_32_regsets[] = {
  1227. [REGSET_GENERAL] = {
  1228. .core_note_type = NT_PRSTATUS,
  1229. .n = sizeof(struct user_regs_struct32) / sizeof(u32),
  1230. .size = sizeof(u32), .align = sizeof(u32),
  1231. .get = genregs32_get, .set = genregs32_set
  1232. },
  1233. [REGSET_FP] = {
  1234. .core_note_type = NT_PRFPREG,
  1235. .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
  1236. .size = sizeof(u32), .align = sizeof(u32),
  1237. .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
  1238. },
  1239. [REGSET_XFP] = {
  1240. .core_note_type = NT_PRXFPREG,
  1241. .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
  1242. .size = sizeof(u32), .align = sizeof(u32),
  1243. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1244. },
  1245. [REGSET_TLS] = {
  1246. .core_note_type = NT_386_TLS,
  1247. .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
  1248. .size = sizeof(struct user_desc),
  1249. .align = sizeof(struct user_desc),
  1250. .active = regset_tls_active,
  1251. .get = regset_tls_get, .set = regset_tls_set
  1252. },
  1253. [REGSET_IOPERM32] = {
  1254. .core_note_type = NT_386_IOPERM,
  1255. .n = IO_BITMAP_BYTES / sizeof(u32),
  1256. .size = sizeof(u32), .align = sizeof(u32),
  1257. .active = ioperm_active, .get = ioperm_get
  1258. },
  1259. };
  1260. static const struct user_regset_view user_x86_32_view = {
  1261. .name = "i386", .e_machine = EM_386,
  1262. .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
  1263. };
  1264. #endif
  1265. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1266. {
  1267. #ifdef CONFIG_IA32_EMULATION
  1268. if (test_tsk_thread_flag(task, TIF_IA32))
  1269. #endif
  1270. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1271. return &user_x86_32_view;
  1272. #endif
  1273. #ifdef CONFIG_X86_64
  1274. return &user_x86_64_view;
  1275. #endif
  1276. }
  1277. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  1278. int error_code, int si_code)
  1279. {
  1280. struct siginfo info;
  1281. tsk->thread.trap_no = 1;
  1282. tsk->thread.error_code = error_code;
  1283. memset(&info, 0, sizeof(info));
  1284. info.si_signo = SIGTRAP;
  1285. info.si_code = si_code;
  1286. /* User-mode ip? */
  1287. info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
  1288. /* Send us the fake SIGTRAP */
  1289. force_sig_info(SIGTRAP, &info, tsk);
  1290. }
  1291. #ifdef CONFIG_X86_32
  1292. # define IS_IA32 1
  1293. #elif defined CONFIG_IA32_EMULATION
  1294. # define IS_IA32 is_compat_task()
  1295. #else
  1296. # define IS_IA32 0
  1297. #endif
  1298. /*
  1299. * We must return the syscall number to actually look up in the table.
  1300. * This can be -1L to skip running any syscall at all.
  1301. */
  1302. asmregparm long syscall_trace_enter(struct pt_regs *regs)
  1303. {
  1304. long ret = 0;
  1305. /*
  1306. * If we stepped into a sysenter/syscall insn, it trapped in
  1307. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  1308. * If user-mode had set TF itself, then it's still clear from
  1309. * do_debug() and we need to set it again to restore the user
  1310. * state. If we entered on the slow path, TF was already set.
  1311. */
  1312. if (test_thread_flag(TIF_SINGLESTEP))
  1313. regs->flags |= X86_EFLAGS_TF;
  1314. /* do the secure computing check first */
  1315. secure_computing(regs->orig_ax);
  1316. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1317. ret = -1L;
  1318. if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
  1319. tracehook_report_syscall_entry(regs))
  1320. ret = -1L;
  1321. if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
  1322. ftrace_syscall_enter(regs);
  1323. if (unlikely(current->audit_context)) {
  1324. if (IS_IA32)
  1325. audit_syscall_entry(AUDIT_ARCH_I386,
  1326. regs->orig_ax,
  1327. regs->bx, regs->cx,
  1328. regs->dx, regs->si);
  1329. #ifdef CONFIG_X86_64
  1330. else
  1331. audit_syscall_entry(AUDIT_ARCH_X86_64,
  1332. regs->orig_ax,
  1333. regs->di, regs->si,
  1334. regs->dx, regs->r10);
  1335. #endif
  1336. }
  1337. return ret ?: regs->orig_ax;
  1338. }
  1339. asmregparm void syscall_trace_leave(struct pt_regs *regs)
  1340. {
  1341. if (unlikely(current->audit_context))
  1342. audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
  1343. if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
  1344. ftrace_syscall_exit(regs);
  1345. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1346. tracehook_report_syscall_exit(regs, 0);
  1347. /*
  1348. * If TIF_SYSCALL_EMU is set, we only get here because of
  1349. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  1350. * We already reported this syscall instruction in
  1351. * syscall_trace_enter(), so don't do any more now.
  1352. */
  1353. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1354. return;
  1355. /*
  1356. * If we are single-stepping, synthesize a trap to follow the
  1357. * system call instruction.
  1358. */
  1359. if (test_thread_flag(TIF_SINGLESTEP) &&
  1360. tracehook_consider_fatal_signal(current, SIGTRAP))
  1361. send_sigtrap(current, regs, 0, TRAP_BRKPT);
  1362. }