ptrace.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. *
  6. * BTS tracing
  7. * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/errno.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/regset.h>
  16. #include <linux/tracehook.h>
  17. #include <linux/user.h>
  18. #include <linux/elf.h>
  19. #include <linux/security.h>
  20. #include <linux/audit.h>
  21. #include <linux/seccomp.h>
  22. #include <linux/signal.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/pgtable.h>
  25. #include <asm/system.h>
  26. #include <asm/processor.h>
  27. #include <asm/i387.h>
  28. #include <asm/debugreg.h>
  29. #include <asm/ldt.h>
  30. #include <asm/desc.h>
  31. #include <asm/prctl.h>
  32. #include <asm/proto.h>
  33. #include <asm/ds.h>
  34. #include "tls.h"
  35. enum x86_regset {
  36. REGSET_GENERAL,
  37. REGSET_FP,
  38. REGSET_XFP,
  39. REGSET_IOPERM64 = REGSET_XFP,
  40. REGSET_TLS,
  41. REGSET_IOPERM32,
  42. };
  43. /*
  44. * does not yet catch signals sent when the child dies.
  45. * in exit.c or in signal.c.
  46. */
  47. /*
  48. * Determines which flags the user has access to [1 = access, 0 = no access].
  49. */
  50. #define FLAG_MASK_32 ((unsigned long) \
  51. (X86_EFLAGS_CF | X86_EFLAGS_PF | \
  52. X86_EFLAGS_AF | X86_EFLAGS_ZF | \
  53. X86_EFLAGS_SF | X86_EFLAGS_TF | \
  54. X86_EFLAGS_DF | X86_EFLAGS_OF | \
  55. X86_EFLAGS_RF | X86_EFLAGS_AC))
  56. /*
  57. * Determines whether a value may be installed in a segment register.
  58. */
  59. static inline bool invalid_selector(u16 value)
  60. {
  61. return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
  62. }
  63. #ifdef CONFIG_X86_32
  64. #define FLAG_MASK FLAG_MASK_32
  65. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
  66. {
  67. BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
  68. regno >>= 2;
  69. if (regno > FS)
  70. --regno;
  71. return &regs->bx + regno;
  72. }
  73. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  74. {
  75. /*
  76. * Returning the value truncates it to 16 bits.
  77. */
  78. unsigned int retval;
  79. if (offset != offsetof(struct user_regs_struct, gs))
  80. retval = *pt_regs_access(task_pt_regs(task), offset);
  81. else {
  82. retval = task->thread.gs;
  83. if (task == current)
  84. savesegment(gs, retval);
  85. }
  86. return retval;
  87. }
  88. static int set_segment_reg(struct task_struct *task,
  89. unsigned long offset, u16 value)
  90. {
  91. /*
  92. * The value argument was already truncated to 16 bits.
  93. */
  94. if (invalid_selector(value))
  95. return -EIO;
  96. /*
  97. * For %cs and %ss we cannot permit a null selector.
  98. * We can permit a bogus selector as long as it has USER_RPL.
  99. * Null selectors are fine for other segment registers, but
  100. * we will never get back to user mode with invalid %cs or %ss
  101. * and will take the trap in iret instead. Much code relies
  102. * on user_mode() to distinguish a user trap frame (which can
  103. * safely use invalid selectors) from a kernel trap frame.
  104. */
  105. switch (offset) {
  106. case offsetof(struct user_regs_struct, cs):
  107. case offsetof(struct user_regs_struct, ss):
  108. if (unlikely(value == 0))
  109. return -EIO;
  110. default:
  111. *pt_regs_access(task_pt_regs(task), offset) = value;
  112. break;
  113. case offsetof(struct user_regs_struct, gs):
  114. task->thread.gs = value;
  115. if (task == current)
  116. /*
  117. * The user-mode %gs is not affected by
  118. * kernel entry, so we must update the CPU.
  119. */
  120. loadsegment(gs, value);
  121. }
  122. return 0;
  123. }
  124. static unsigned long debugreg_addr_limit(struct task_struct *task)
  125. {
  126. return TASK_SIZE - 3;
  127. }
  128. #else /* CONFIG_X86_64 */
  129. #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
  130. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
  131. {
  132. BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
  133. return &regs->r15 + (offset / sizeof(regs->r15));
  134. }
  135. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  136. {
  137. /*
  138. * Returning the value truncates it to 16 bits.
  139. */
  140. unsigned int seg;
  141. switch (offset) {
  142. case offsetof(struct user_regs_struct, fs):
  143. if (task == current) {
  144. /* Older gas can't assemble movq %?s,%r?? */
  145. asm("movl %%fs,%0" : "=r" (seg));
  146. return seg;
  147. }
  148. return task->thread.fsindex;
  149. case offsetof(struct user_regs_struct, gs):
  150. if (task == current) {
  151. asm("movl %%gs,%0" : "=r" (seg));
  152. return seg;
  153. }
  154. return task->thread.gsindex;
  155. case offsetof(struct user_regs_struct, ds):
  156. if (task == current) {
  157. asm("movl %%ds,%0" : "=r" (seg));
  158. return seg;
  159. }
  160. return task->thread.ds;
  161. case offsetof(struct user_regs_struct, es):
  162. if (task == current) {
  163. asm("movl %%es,%0" : "=r" (seg));
  164. return seg;
  165. }
  166. return task->thread.es;
  167. case offsetof(struct user_regs_struct, cs):
  168. case offsetof(struct user_regs_struct, ss):
  169. break;
  170. }
  171. return *pt_regs_access(task_pt_regs(task), offset);
  172. }
  173. static int set_segment_reg(struct task_struct *task,
  174. unsigned long offset, u16 value)
  175. {
  176. /*
  177. * The value argument was already truncated to 16 bits.
  178. */
  179. if (invalid_selector(value))
  180. return -EIO;
  181. switch (offset) {
  182. case offsetof(struct user_regs_struct,fs):
  183. /*
  184. * If this is setting fs as for normal 64-bit use but
  185. * setting fs_base has implicitly changed it, leave it.
  186. */
  187. if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
  188. task->thread.fs != 0) ||
  189. (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
  190. task->thread.fs == 0))
  191. break;
  192. task->thread.fsindex = value;
  193. if (task == current)
  194. loadsegment(fs, task->thread.fsindex);
  195. break;
  196. case offsetof(struct user_regs_struct,gs):
  197. /*
  198. * If this is setting gs as for normal 64-bit use but
  199. * setting gs_base has implicitly changed it, leave it.
  200. */
  201. if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
  202. task->thread.gs != 0) ||
  203. (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
  204. task->thread.gs == 0))
  205. break;
  206. task->thread.gsindex = value;
  207. if (task == current)
  208. load_gs_index(task->thread.gsindex);
  209. break;
  210. case offsetof(struct user_regs_struct,ds):
  211. task->thread.ds = value;
  212. if (task == current)
  213. loadsegment(ds, task->thread.ds);
  214. break;
  215. case offsetof(struct user_regs_struct,es):
  216. task->thread.es = value;
  217. if (task == current)
  218. loadsegment(es, task->thread.es);
  219. break;
  220. /*
  221. * Can't actually change these in 64-bit mode.
  222. */
  223. case offsetof(struct user_regs_struct,cs):
  224. if (unlikely(value == 0))
  225. return -EIO;
  226. #ifdef CONFIG_IA32_EMULATION
  227. if (test_tsk_thread_flag(task, TIF_IA32))
  228. task_pt_regs(task)->cs = value;
  229. #endif
  230. break;
  231. case offsetof(struct user_regs_struct,ss):
  232. if (unlikely(value == 0))
  233. return -EIO;
  234. #ifdef CONFIG_IA32_EMULATION
  235. if (test_tsk_thread_flag(task, TIF_IA32))
  236. task_pt_regs(task)->ss = value;
  237. #endif
  238. break;
  239. }
  240. return 0;
  241. }
  242. static unsigned long debugreg_addr_limit(struct task_struct *task)
  243. {
  244. #ifdef CONFIG_IA32_EMULATION
  245. if (test_tsk_thread_flag(task, TIF_IA32))
  246. return IA32_PAGE_OFFSET - 3;
  247. #endif
  248. return TASK_SIZE64 - 7;
  249. }
  250. #endif /* CONFIG_X86_32 */
  251. static unsigned long get_flags(struct task_struct *task)
  252. {
  253. unsigned long retval = task_pt_regs(task)->flags;
  254. /*
  255. * If the debugger set TF, hide it from the readout.
  256. */
  257. if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  258. retval &= ~X86_EFLAGS_TF;
  259. return retval;
  260. }
  261. static int set_flags(struct task_struct *task, unsigned long value)
  262. {
  263. struct pt_regs *regs = task_pt_regs(task);
  264. /*
  265. * If the user value contains TF, mark that
  266. * it was not "us" (the debugger) that set it.
  267. * If not, make sure it stays set if we had.
  268. */
  269. if (value & X86_EFLAGS_TF)
  270. clear_tsk_thread_flag(task, TIF_FORCED_TF);
  271. else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  272. value |= X86_EFLAGS_TF;
  273. regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
  274. return 0;
  275. }
  276. static int putreg(struct task_struct *child,
  277. unsigned long offset, unsigned long value)
  278. {
  279. switch (offset) {
  280. case offsetof(struct user_regs_struct, cs):
  281. case offsetof(struct user_regs_struct, ds):
  282. case offsetof(struct user_regs_struct, es):
  283. case offsetof(struct user_regs_struct, fs):
  284. case offsetof(struct user_regs_struct, gs):
  285. case offsetof(struct user_regs_struct, ss):
  286. return set_segment_reg(child, offset, value);
  287. case offsetof(struct user_regs_struct, flags):
  288. return set_flags(child, value);
  289. #ifdef CONFIG_X86_64
  290. /*
  291. * Orig_ax is really just a flag with small positive and
  292. * negative values, so make sure to always sign-extend it
  293. * from 32 bits so that it works correctly regardless of
  294. * whether we come from a 32-bit environment or not.
  295. */
  296. case offsetof(struct user_regs_struct, orig_ax):
  297. value = (long) (s32) value;
  298. break;
  299. case offsetof(struct user_regs_struct,fs_base):
  300. if (value >= TASK_SIZE_OF(child))
  301. return -EIO;
  302. /*
  303. * When changing the segment base, use do_arch_prctl
  304. * to set either thread.fs or thread.fsindex and the
  305. * corresponding GDT slot.
  306. */
  307. if (child->thread.fs != value)
  308. return do_arch_prctl(child, ARCH_SET_FS, value);
  309. return 0;
  310. case offsetof(struct user_regs_struct,gs_base):
  311. /*
  312. * Exactly the same here as the %fs handling above.
  313. */
  314. if (value >= TASK_SIZE_OF(child))
  315. return -EIO;
  316. if (child->thread.gs != value)
  317. return do_arch_prctl(child, ARCH_SET_GS, value);
  318. return 0;
  319. #endif
  320. }
  321. *pt_regs_access(task_pt_regs(child), offset) = value;
  322. return 0;
  323. }
  324. static unsigned long getreg(struct task_struct *task, unsigned long offset)
  325. {
  326. switch (offset) {
  327. case offsetof(struct user_regs_struct, cs):
  328. case offsetof(struct user_regs_struct, ds):
  329. case offsetof(struct user_regs_struct, es):
  330. case offsetof(struct user_regs_struct, fs):
  331. case offsetof(struct user_regs_struct, gs):
  332. case offsetof(struct user_regs_struct, ss):
  333. return get_segment_reg(task, offset);
  334. case offsetof(struct user_regs_struct, flags):
  335. return get_flags(task);
  336. #ifdef CONFIG_X86_64
  337. case offsetof(struct user_regs_struct, fs_base): {
  338. /*
  339. * do_arch_prctl may have used a GDT slot instead of
  340. * the MSR. To userland, it appears the same either
  341. * way, except the %fs segment selector might not be 0.
  342. */
  343. unsigned int seg = task->thread.fsindex;
  344. if (task->thread.fs != 0)
  345. return task->thread.fs;
  346. if (task == current)
  347. asm("movl %%fs,%0" : "=r" (seg));
  348. if (seg != FS_TLS_SEL)
  349. return 0;
  350. return get_desc_base(&task->thread.tls_array[FS_TLS]);
  351. }
  352. case offsetof(struct user_regs_struct, gs_base): {
  353. /*
  354. * Exactly the same here as the %fs handling above.
  355. */
  356. unsigned int seg = task->thread.gsindex;
  357. if (task->thread.gs != 0)
  358. return task->thread.gs;
  359. if (task == current)
  360. asm("movl %%gs,%0" : "=r" (seg));
  361. if (seg != GS_TLS_SEL)
  362. return 0;
  363. return get_desc_base(&task->thread.tls_array[GS_TLS]);
  364. }
  365. #endif
  366. }
  367. return *pt_regs_access(task_pt_regs(task), offset);
  368. }
  369. static int genregs_get(struct task_struct *target,
  370. const struct user_regset *regset,
  371. unsigned int pos, unsigned int count,
  372. void *kbuf, void __user *ubuf)
  373. {
  374. if (kbuf) {
  375. unsigned long *k = kbuf;
  376. while (count > 0) {
  377. *k++ = getreg(target, pos);
  378. count -= sizeof(*k);
  379. pos += sizeof(*k);
  380. }
  381. } else {
  382. unsigned long __user *u = ubuf;
  383. while (count > 0) {
  384. if (__put_user(getreg(target, pos), u++))
  385. return -EFAULT;
  386. count -= sizeof(*u);
  387. pos += sizeof(*u);
  388. }
  389. }
  390. return 0;
  391. }
  392. static int genregs_set(struct task_struct *target,
  393. const struct user_regset *regset,
  394. unsigned int pos, unsigned int count,
  395. const void *kbuf, const void __user *ubuf)
  396. {
  397. int ret = 0;
  398. if (kbuf) {
  399. const unsigned long *k = kbuf;
  400. while (count > 0 && !ret) {
  401. ret = putreg(target, pos, *k++);
  402. count -= sizeof(*k);
  403. pos += sizeof(*k);
  404. }
  405. } else {
  406. const unsigned long __user *u = ubuf;
  407. while (count > 0 && !ret) {
  408. unsigned long word;
  409. ret = __get_user(word, u++);
  410. if (ret)
  411. break;
  412. ret = putreg(target, pos, word);
  413. count -= sizeof(*u);
  414. pos += sizeof(*u);
  415. }
  416. }
  417. return ret;
  418. }
  419. /*
  420. * This function is trivial and will be inlined by the compiler.
  421. * Having it separates the implementation details of debug
  422. * registers from the interface details of ptrace.
  423. */
  424. static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
  425. {
  426. switch (n) {
  427. case 0: return child->thread.debugreg0;
  428. case 1: return child->thread.debugreg1;
  429. case 2: return child->thread.debugreg2;
  430. case 3: return child->thread.debugreg3;
  431. case 6: return child->thread.debugreg6;
  432. case 7: return child->thread.debugreg7;
  433. }
  434. return 0;
  435. }
  436. static int ptrace_set_debugreg(struct task_struct *child,
  437. int n, unsigned long data)
  438. {
  439. int i;
  440. if (unlikely(n == 4 || n == 5))
  441. return -EIO;
  442. if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
  443. return -EIO;
  444. switch (n) {
  445. case 0: child->thread.debugreg0 = data; break;
  446. case 1: child->thread.debugreg1 = data; break;
  447. case 2: child->thread.debugreg2 = data; break;
  448. case 3: child->thread.debugreg3 = data; break;
  449. case 6:
  450. if ((data & ~0xffffffffUL) != 0)
  451. return -EIO;
  452. child->thread.debugreg6 = data;
  453. break;
  454. case 7:
  455. /*
  456. * Sanity-check data. Take one half-byte at once with
  457. * check = (val >> (16 + 4*i)) & 0xf. It contains the
  458. * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
  459. * 2 and 3 are LENi. Given a list of invalid values,
  460. * we do mask |= 1 << invalid_value, so that
  461. * (mask >> check) & 1 is a correct test for invalid
  462. * values.
  463. *
  464. * R/Wi contains the type of the breakpoint /
  465. * watchpoint, LENi contains the length of the watched
  466. * data in the watchpoint case.
  467. *
  468. * The invalid values are:
  469. * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
  470. * - R/Wi == 0x10 (break on I/O reads or writes), so
  471. * mask |= 0x4444.
  472. * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
  473. * 0x1110.
  474. *
  475. * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
  476. *
  477. * See the Intel Manual "System Programming Guide",
  478. * 15.2.4
  479. *
  480. * Note that LENi == 0x10 is defined on x86_64 in long
  481. * mode (i.e. even for 32-bit userspace software, but
  482. * 64-bit kernel), so the x86_64 mask value is 0x5454.
  483. * See the AMD manual no. 24593 (AMD64 System Programming)
  484. */
  485. #ifdef CONFIG_X86_32
  486. #define DR7_MASK 0x5f54
  487. #else
  488. #define DR7_MASK 0x5554
  489. #endif
  490. data &= ~DR_CONTROL_RESERVED;
  491. for (i = 0; i < 4; i++)
  492. if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
  493. return -EIO;
  494. child->thread.debugreg7 = data;
  495. if (data)
  496. set_tsk_thread_flag(child, TIF_DEBUG);
  497. else
  498. clear_tsk_thread_flag(child, TIF_DEBUG);
  499. break;
  500. }
  501. return 0;
  502. }
  503. /*
  504. * These access the current or another (stopped) task's io permission
  505. * bitmap for debugging or core dump.
  506. */
  507. static int ioperm_active(struct task_struct *target,
  508. const struct user_regset *regset)
  509. {
  510. return target->thread.io_bitmap_max / regset->size;
  511. }
  512. static int ioperm_get(struct task_struct *target,
  513. const struct user_regset *regset,
  514. unsigned int pos, unsigned int count,
  515. void *kbuf, void __user *ubuf)
  516. {
  517. if (!target->thread.io_bitmap_ptr)
  518. return -ENXIO;
  519. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  520. target->thread.io_bitmap_ptr,
  521. 0, IO_BITMAP_BYTES);
  522. }
  523. #ifdef CONFIG_X86_PTRACE_BTS
  524. /*
  525. * The configuration for a particular BTS hardware implementation.
  526. */
  527. struct bts_configuration {
  528. /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
  529. unsigned char sizeof_bts;
  530. /* the size of a field in the BTS record in bytes */
  531. unsigned char sizeof_field;
  532. /* a bitmask to enable/disable BTS in DEBUGCTL MSR */
  533. unsigned long debugctl_mask;
  534. };
  535. static struct bts_configuration bts_cfg;
  536. #define BTS_MAX_RECORD_SIZE (8 * 3)
  537. /*
  538. * Branch Trace Store (BTS) uses the following format. Different
  539. * architectures vary in the size of those fields.
  540. * - source linear address
  541. * - destination linear address
  542. * - flags
  543. *
  544. * Later architectures use 64bit pointers throughout, whereas earlier
  545. * architectures use 32bit pointers in 32bit mode.
  546. *
  547. * We compute the base address for the first 8 fields based on:
  548. * - the field size stored in the DS configuration
  549. * - the relative field position
  550. *
  551. * In order to store additional information in the BTS buffer, we use
  552. * a special source address to indicate that the record requires
  553. * special interpretation.
  554. *
  555. * Netburst indicated via a bit in the flags field whether the branch
  556. * was predicted; this is ignored.
  557. */
  558. enum bts_field {
  559. bts_from = 0,
  560. bts_to,
  561. bts_flags,
  562. bts_escape = (unsigned long)-1,
  563. bts_qual = bts_to,
  564. bts_jiffies = bts_flags
  565. };
  566. static inline unsigned long bts_get(const char *base, enum bts_field field)
  567. {
  568. base += (bts_cfg.sizeof_field * field);
  569. return *(unsigned long *)base;
  570. }
  571. static inline void bts_set(char *base, enum bts_field field, unsigned long val)
  572. {
  573. base += (bts_cfg.sizeof_field * field);;
  574. (*(unsigned long *)base) = val;
  575. }
  576. /*
  577. * Translate a BTS record from the raw format into the bts_struct format
  578. *
  579. * out (out): bts_struct interpretation
  580. * raw: raw BTS record
  581. */
  582. static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
  583. {
  584. memset(out, 0, sizeof(*out));
  585. if (bts_get(raw, bts_from) == bts_escape) {
  586. out->qualifier = bts_get(raw, bts_qual);
  587. out->variant.jiffies = bts_get(raw, bts_jiffies);
  588. } else {
  589. out->qualifier = BTS_BRANCH;
  590. out->variant.lbr.from_ip = bts_get(raw, bts_from);
  591. out->variant.lbr.to_ip = bts_get(raw, bts_to);
  592. }
  593. }
  594. static int ptrace_bts_read_record(struct task_struct *child, size_t index,
  595. struct bts_struct __user *out)
  596. {
  597. struct bts_struct ret;
  598. const void *bts_record;
  599. size_t bts_index, bts_end;
  600. int error;
  601. error = ds_get_bts_end(child->bts, &bts_end);
  602. if (error < 0)
  603. return error;
  604. if (bts_end <= index)
  605. return -EINVAL;
  606. error = ds_get_bts_index(child->bts, &bts_index);
  607. if (error < 0)
  608. return error;
  609. /* translate the ptrace bts index into the ds bts index */
  610. bts_index += bts_end - (index + 1);
  611. if (bts_end <= bts_index)
  612. bts_index -= bts_end;
  613. error = ds_access_bts(child->bts, bts_index, &bts_record);
  614. if (error < 0)
  615. return error;
  616. ptrace_bts_translate_record(&ret, bts_record);
  617. if (copy_to_user(out, &ret, sizeof(ret)))
  618. return -EFAULT;
  619. return sizeof(ret);
  620. }
  621. static int ptrace_bts_drain(struct task_struct *child,
  622. long size,
  623. struct bts_struct __user *out)
  624. {
  625. struct bts_struct ret;
  626. const unsigned char *raw;
  627. size_t end, i;
  628. int error;
  629. error = ds_get_bts_index(child->bts, &end);
  630. if (error < 0)
  631. return error;
  632. if (size < (end * sizeof(struct bts_struct)))
  633. return -EIO;
  634. error = ds_access_bts(child->bts, 0, (const void **)&raw);
  635. if (error < 0)
  636. return error;
  637. for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
  638. ptrace_bts_translate_record(&ret, raw);
  639. if (copy_to_user(out, &ret, sizeof(ret)))
  640. return -EFAULT;
  641. }
  642. error = ds_clear_bts(child->bts);
  643. if (error < 0)
  644. return error;
  645. return end;
  646. }
  647. static int ptrace_bts_config(struct task_struct *child,
  648. long cfg_size,
  649. const struct ptrace_bts_config __user *ucfg)
  650. {
  651. struct ptrace_bts_config cfg;
  652. int error = 0;
  653. error = -EOPNOTSUPP;
  654. if (!bts_cfg.sizeof_bts)
  655. goto errout;
  656. error = -EIO;
  657. if (cfg_size < sizeof(cfg))
  658. goto errout;
  659. error = -EFAULT;
  660. if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
  661. goto errout;
  662. error = -EINVAL;
  663. if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
  664. !(cfg.flags & PTRACE_BTS_O_ALLOC))
  665. goto errout;
  666. if (cfg.flags & PTRACE_BTS_O_ALLOC) {
  667. bts_ovfl_callback_t ovfl = NULL;
  668. unsigned int sig = 0;
  669. if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
  670. if (!cfg.signal)
  671. goto errout;
  672. error = -EOPNOTSUPP;
  673. goto errout;
  674. sig = cfg.signal;
  675. }
  676. if (child->bts)
  677. (void)ds_release_bts(child->bts);
  678. child->bts = ds_request_bts(child, /* base = */ NULL, cfg.size,
  679. ovfl, /* th = */ (size_t)-1);
  680. if (IS_ERR(child->bts)) {
  681. error = PTR_ERR(child->bts);
  682. child->bts = NULL;
  683. goto errout;
  684. }
  685. child->thread.bts_ovfl_signal = sig;
  686. }
  687. error = -EINVAL;
  688. if (!child->thread.ds_ctx && cfg.flags)
  689. goto errout;
  690. if (cfg.flags & PTRACE_BTS_O_TRACE)
  691. child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
  692. else
  693. child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
  694. if (cfg.flags & PTRACE_BTS_O_SCHED)
  695. set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  696. else
  697. clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  698. error = sizeof(cfg);
  699. out:
  700. if (child->thread.debugctlmsr)
  701. set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  702. else
  703. clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  704. return error;
  705. errout:
  706. child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
  707. clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  708. goto out;
  709. }
  710. static int ptrace_bts_status(struct task_struct *child,
  711. long cfg_size,
  712. struct ptrace_bts_config __user *ucfg)
  713. {
  714. struct ptrace_bts_config cfg;
  715. size_t end;
  716. const void *base, *max;
  717. int error;
  718. if (cfg_size < sizeof(cfg))
  719. return -EIO;
  720. error = ds_get_bts_end(child->bts, &end);
  721. if (error < 0)
  722. return error;
  723. error = ds_access_bts(child->bts, /* index = */ 0, &base);
  724. if (error < 0)
  725. return error;
  726. error = ds_access_bts(child->bts, /* index = */ end, &max);
  727. if (error < 0)
  728. return error;
  729. memset(&cfg, 0, sizeof(cfg));
  730. cfg.size = (max - base);
  731. cfg.signal = child->thread.bts_ovfl_signal;
  732. cfg.bts_size = sizeof(struct bts_struct);
  733. if (cfg.signal)
  734. cfg.flags |= PTRACE_BTS_O_SIGNAL;
  735. if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
  736. child->thread.debugctlmsr & bts_cfg.debugctl_mask)
  737. cfg.flags |= PTRACE_BTS_O_TRACE;
  738. if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
  739. cfg.flags |= PTRACE_BTS_O_SCHED;
  740. if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
  741. return -EFAULT;
  742. return sizeof(cfg);
  743. }
  744. static int ptrace_bts_write_record(struct task_struct *child,
  745. const struct bts_struct *in)
  746. {
  747. unsigned char bts_record[BTS_MAX_RECORD_SIZE];
  748. BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
  749. memset(bts_record, 0, bts_cfg.sizeof_bts);
  750. switch (in->qualifier) {
  751. case BTS_INVALID:
  752. break;
  753. case BTS_BRANCH:
  754. bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
  755. bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
  756. break;
  757. case BTS_TASK_ARRIVES:
  758. case BTS_TASK_DEPARTS:
  759. bts_set(bts_record, bts_from, bts_escape);
  760. bts_set(bts_record, bts_qual, in->qualifier);
  761. bts_set(bts_record, bts_jiffies, in->variant.jiffies);
  762. break;
  763. default:
  764. return -EINVAL;
  765. }
  766. return ds_write_bts(child->bts, bts_record, bts_cfg.sizeof_bts);
  767. }
  768. void ptrace_bts_take_timestamp(struct task_struct *tsk,
  769. enum bts_qualifier qualifier)
  770. {
  771. struct bts_struct rec = {
  772. .qualifier = qualifier,
  773. .variant.jiffies = jiffies_64
  774. };
  775. ptrace_bts_write_record(tsk, &rec);
  776. }
  777. static const struct bts_configuration bts_cfg_netburst = {
  778. .sizeof_bts = sizeof(long) * 3,
  779. .sizeof_field = sizeof(long),
  780. .debugctl_mask = (1<<2)|(1<<3)|(1<<5)
  781. };
  782. static const struct bts_configuration bts_cfg_pentium_m = {
  783. .sizeof_bts = sizeof(long) * 3,
  784. .sizeof_field = sizeof(long),
  785. .debugctl_mask = (1<<6)|(1<<7)
  786. };
  787. static const struct bts_configuration bts_cfg_core2 = {
  788. .sizeof_bts = 8 * 3,
  789. .sizeof_field = 8,
  790. .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
  791. };
  792. static inline void bts_configure(const struct bts_configuration *cfg)
  793. {
  794. bts_cfg = *cfg;
  795. }
  796. void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
  797. {
  798. switch (c->x86) {
  799. case 0x6:
  800. switch (c->x86_model) {
  801. case 0 ... 0xC:
  802. /* sorry, don't know about them */
  803. break;
  804. case 0xD:
  805. case 0xE: /* Pentium M */
  806. bts_configure(&bts_cfg_pentium_m);
  807. break;
  808. default: /* Core2, Atom, ... */
  809. bts_configure(&bts_cfg_core2);
  810. break;
  811. }
  812. break;
  813. case 0xF:
  814. switch (c->x86_model) {
  815. case 0x0:
  816. case 0x1:
  817. case 0x2: /* Netburst */
  818. bts_configure(&bts_cfg_netburst);
  819. break;
  820. default:
  821. /* sorry, don't know about them */
  822. break;
  823. }
  824. break;
  825. default:
  826. /* sorry, don't know about them */
  827. break;
  828. }
  829. }
  830. #endif /* CONFIG_X86_PTRACE_BTS */
  831. /*
  832. * Called by kernel/ptrace.c when detaching..
  833. *
  834. * Make sure the single step bit is not set.
  835. */
  836. void ptrace_disable(struct task_struct *child)
  837. {
  838. user_disable_single_step(child);
  839. #ifdef TIF_SYSCALL_EMU
  840. clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
  841. #endif
  842. #ifdef CONFIG_X86_PTRACE_BTS
  843. if (child->bts) {
  844. (void)ds_release_bts(child->bts);
  845. child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
  846. if (!child->thread.debugctlmsr)
  847. clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  848. clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  849. }
  850. #endif /* CONFIG_X86_PTRACE_BTS */
  851. }
  852. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  853. static const struct user_regset_view user_x86_32_view; /* Initialized below. */
  854. #endif
  855. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  856. {
  857. int ret;
  858. unsigned long __user *datap = (unsigned long __user *)data;
  859. switch (request) {
  860. /* read the word at location addr in the USER area. */
  861. case PTRACE_PEEKUSR: {
  862. unsigned long tmp;
  863. ret = -EIO;
  864. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  865. addr >= sizeof(struct user))
  866. break;
  867. tmp = 0; /* Default return condition */
  868. if (addr < sizeof(struct user_regs_struct))
  869. tmp = getreg(child, addr);
  870. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  871. addr <= offsetof(struct user, u_debugreg[7])) {
  872. addr -= offsetof(struct user, u_debugreg[0]);
  873. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  874. }
  875. ret = put_user(tmp, datap);
  876. break;
  877. }
  878. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  879. ret = -EIO;
  880. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  881. addr >= sizeof(struct user))
  882. break;
  883. if (addr < sizeof(struct user_regs_struct))
  884. ret = putreg(child, addr, data);
  885. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  886. addr <= offsetof(struct user, u_debugreg[7])) {
  887. addr -= offsetof(struct user, u_debugreg[0]);
  888. ret = ptrace_set_debugreg(child,
  889. addr / sizeof(data), data);
  890. }
  891. break;
  892. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  893. return copy_regset_to_user(child,
  894. task_user_regset_view(current),
  895. REGSET_GENERAL,
  896. 0, sizeof(struct user_regs_struct),
  897. datap);
  898. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  899. return copy_regset_from_user(child,
  900. task_user_regset_view(current),
  901. REGSET_GENERAL,
  902. 0, sizeof(struct user_regs_struct),
  903. datap);
  904. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  905. return copy_regset_to_user(child,
  906. task_user_regset_view(current),
  907. REGSET_FP,
  908. 0, sizeof(struct user_i387_struct),
  909. datap);
  910. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  911. return copy_regset_from_user(child,
  912. task_user_regset_view(current),
  913. REGSET_FP,
  914. 0, sizeof(struct user_i387_struct),
  915. datap);
  916. #ifdef CONFIG_X86_32
  917. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  918. return copy_regset_to_user(child, &user_x86_32_view,
  919. REGSET_XFP,
  920. 0, sizeof(struct user_fxsr_struct),
  921. datap) ? -EIO : 0;
  922. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  923. return copy_regset_from_user(child, &user_x86_32_view,
  924. REGSET_XFP,
  925. 0, sizeof(struct user_fxsr_struct),
  926. datap) ? -EIO : 0;
  927. #endif
  928. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  929. case PTRACE_GET_THREAD_AREA:
  930. if (addr < 0)
  931. return -EIO;
  932. ret = do_get_thread_area(child, addr,
  933. (struct user_desc __user *) data);
  934. break;
  935. case PTRACE_SET_THREAD_AREA:
  936. if (addr < 0)
  937. return -EIO;
  938. ret = do_set_thread_area(child, addr,
  939. (struct user_desc __user *) data, 0);
  940. break;
  941. #endif
  942. #ifdef CONFIG_X86_64
  943. /* normal 64bit interface to access TLS data.
  944. Works just like arch_prctl, except that the arguments
  945. are reversed. */
  946. case PTRACE_ARCH_PRCTL:
  947. ret = do_arch_prctl(child, data, addr);
  948. break;
  949. #endif
  950. /*
  951. * These bits need more cooking - not enabled yet:
  952. */
  953. #ifdef CONFIG_X86_PTRACE_BTS
  954. case PTRACE_BTS_CONFIG:
  955. ret = ptrace_bts_config
  956. (child, data, (struct ptrace_bts_config __user *)addr);
  957. break;
  958. case PTRACE_BTS_STATUS:
  959. ret = ptrace_bts_status
  960. (child, data, (struct ptrace_bts_config __user *)addr);
  961. break;
  962. case PTRACE_BTS_SIZE: {
  963. size_t size;
  964. ret = ds_get_bts_index(child->bts, &size);
  965. if (ret == 0) {
  966. BUG_ON(size != (int) size);
  967. ret = (int) size;
  968. }
  969. break;
  970. }
  971. case PTRACE_BTS_GET:
  972. ret = ptrace_bts_read_record
  973. (child, data, (struct bts_struct __user *) addr);
  974. break;
  975. case PTRACE_BTS_CLEAR:
  976. ret = ds_clear_bts(child->bts);
  977. break;
  978. case PTRACE_BTS_DRAIN:
  979. ret = ptrace_bts_drain
  980. (child, data, (struct bts_struct __user *) addr);
  981. break;
  982. #endif /* CONFIG_X86_PTRACE_BTS */
  983. default:
  984. ret = ptrace_request(child, request, addr, data);
  985. break;
  986. }
  987. return ret;
  988. }
  989. #ifdef CONFIG_IA32_EMULATION
  990. #include <linux/compat.h>
  991. #include <linux/syscalls.h>
  992. #include <asm/ia32.h>
  993. #include <asm/user32.h>
  994. #define R32(l,q) \
  995. case offsetof(struct user32, regs.l): \
  996. regs->q = value; break
  997. #define SEG32(rs) \
  998. case offsetof(struct user32, regs.rs): \
  999. return set_segment_reg(child, \
  1000. offsetof(struct user_regs_struct, rs), \
  1001. value); \
  1002. break
  1003. static int putreg32(struct task_struct *child, unsigned regno, u32 value)
  1004. {
  1005. struct pt_regs *regs = task_pt_regs(child);
  1006. switch (regno) {
  1007. SEG32(cs);
  1008. SEG32(ds);
  1009. SEG32(es);
  1010. SEG32(fs);
  1011. SEG32(gs);
  1012. SEG32(ss);
  1013. R32(ebx, bx);
  1014. R32(ecx, cx);
  1015. R32(edx, dx);
  1016. R32(edi, di);
  1017. R32(esi, si);
  1018. R32(ebp, bp);
  1019. R32(eax, ax);
  1020. R32(eip, ip);
  1021. R32(esp, sp);
  1022. case offsetof(struct user32, regs.orig_eax):
  1023. /*
  1024. * Sign-extend the value so that orig_eax = -1
  1025. * causes (long)orig_ax < 0 tests to fire correctly.
  1026. */
  1027. regs->orig_ax = (long) (s32) value;
  1028. break;
  1029. case offsetof(struct user32, regs.eflags):
  1030. return set_flags(child, value);
  1031. case offsetof(struct user32, u_debugreg[0]) ...
  1032. offsetof(struct user32, u_debugreg[7]):
  1033. regno -= offsetof(struct user32, u_debugreg[0]);
  1034. return ptrace_set_debugreg(child, regno / 4, value);
  1035. default:
  1036. if (regno > sizeof(struct user32) || (regno & 3))
  1037. return -EIO;
  1038. /*
  1039. * Other dummy fields in the virtual user structure
  1040. * are ignored
  1041. */
  1042. break;
  1043. }
  1044. return 0;
  1045. }
  1046. #undef R32
  1047. #undef SEG32
  1048. #define R32(l,q) \
  1049. case offsetof(struct user32, regs.l): \
  1050. *val = regs->q; break
  1051. #define SEG32(rs) \
  1052. case offsetof(struct user32, regs.rs): \
  1053. *val = get_segment_reg(child, \
  1054. offsetof(struct user_regs_struct, rs)); \
  1055. break
  1056. static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
  1057. {
  1058. struct pt_regs *regs = task_pt_regs(child);
  1059. switch (regno) {
  1060. SEG32(ds);
  1061. SEG32(es);
  1062. SEG32(fs);
  1063. SEG32(gs);
  1064. R32(cs, cs);
  1065. R32(ss, ss);
  1066. R32(ebx, bx);
  1067. R32(ecx, cx);
  1068. R32(edx, dx);
  1069. R32(edi, di);
  1070. R32(esi, si);
  1071. R32(ebp, bp);
  1072. R32(eax, ax);
  1073. R32(orig_eax, orig_ax);
  1074. R32(eip, ip);
  1075. R32(esp, sp);
  1076. case offsetof(struct user32, regs.eflags):
  1077. *val = get_flags(child);
  1078. break;
  1079. case offsetof(struct user32, u_debugreg[0]) ...
  1080. offsetof(struct user32, u_debugreg[7]):
  1081. regno -= offsetof(struct user32, u_debugreg[0]);
  1082. *val = ptrace_get_debugreg(child, regno / 4);
  1083. break;
  1084. default:
  1085. if (regno > sizeof(struct user32) || (regno & 3))
  1086. return -EIO;
  1087. /*
  1088. * Other dummy fields in the virtual user structure
  1089. * are ignored
  1090. */
  1091. *val = 0;
  1092. break;
  1093. }
  1094. return 0;
  1095. }
  1096. #undef R32
  1097. #undef SEG32
  1098. static int genregs32_get(struct task_struct *target,
  1099. const struct user_regset *regset,
  1100. unsigned int pos, unsigned int count,
  1101. void *kbuf, void __user *ubuf)
  1102. {
  1103. if (kbuf) {
  1104. compat_ulong_t *k = kbuf;
  1105. while (count > 0) {
  1106. getreg32(target, pos, k++);
  1107. count -= sizeof(*k);
  1108. pos += sizeof(*k);
  1109. }
  1110. } else {
  1111. compat_ulong_t __user *u = ubuf;
  1112. while (count > 0) {
  1113. compat_ulong_t word;
  1114. getreg32(target, pos, &word);
  1115. if (__put_user(word, u++))
  1116. return -EFAULT;
  1117. count -= sizeof(*u);
  1118. pos += sizeof(*u);
  1119. }
  1120. }
  1121. return 0;
  1122. }
  1123. static int genregs32_set(struct task_struct *target,
  1124. const struct user_regset *regset,
  1125. unsigned int pos, unsigned int count,
  1126. const void *kbuf, const void __user *ubuf)
  1127. {
  1128. int ret = 0;
  1129. if (kbuf) {
  1130. const compat_ulong_t *k = kbuf;
  1131. while (count > 0 && !ret) {
  1132. ret = putreg32(target, pos, *k++);
  1133. count -= sizeof(*k);
  1134. pos += sizeof(*k);
  1135. }
  1136. } else {
  1137. const compat_ulong_t __user *u = ubuf;
  1138. while (count > 0 && !ret) {
  1139. compat_ulong_t word;
  1140. ret = __get_user(word, u++);
  1141. if (ret)
  1142. break;
  1143. ret = putreg32(target, pos, word);
  1144. count -= sizeof(*u);
  1145. pos += sizeof(*u);
  1146. }
  1147. }
  1148. return ret;
  1149. }
  1150. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  1151. compat_ulong_t caddr, compat_ulong_t cdata)
  1152. {
  1153. unsigned long addr = caddr;
  1154. unsigned long data = cdata;
  1155. void __user *datap = compat_ptr(data);
  1156. int ret;
  1157. __u32 val;
  1158. switch (request) {
  1159. case PTRACE_PEEKUSR:
  1160. ret = getreg32(child, addr, &val);
  1161. if (ret == 0)
  1162. ret = put_user(val, (__u32 __user *)datap);
  1163. break;
  1164. case PTRACE_POKEUSR:
  1165. ret = putreg32(child, addr, data);
  1166. break;
  1167. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1168. return copy_regset_to_user(child, &user_x86_32_view,
  1169. REGSET_GENERAL,
  1170. 0, sizeof(struct user_regs_struct32),
  1171. datap);
  1172. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1173. return copy_regset_from_user(child, &user_x86_32_view,
  1174. REGSET_GENERAL, 0,
  1175. sizeof(struct user_regs_struct32),
  1176. datap);
  1177. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1178. return copy_regset_to_user(child, &user_x86_32_view,
  1179. REGSET_FP, 0,
  1180. sizeof(struct user_i387_ia32_struct),
  1181. datap);
  1182. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1183. return copy_regset_from_user(
  1184. child, &user_x86_32_view, REGSET_FP,
  1185. 0, sizeof(struct user_i387_ia32_struct), datap);
  1186. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  1187. return copy_regset_to_user(child, &user_x86_32_view,
  1188. REGSET_XFP, 0,
  1189. sizeof(struct user32_fxsr_struct),
  1190. datap);
  1191. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  1192. return copy_regset_from_user(child, &user_x86_32_view,
  1193. REGSET_XFP, 0,
  1194. sizeof(struct user32_fxsr_struct),
  1195. datap);
  1196. case PTRACE_GET_THREAD_AREA:
  1197. case PTRACE_SET_THREAD_AREA:
  1198. return arch_ptrace(child, request, addr, data);
  1199. default:
  1200. return compat_ptrace_request(child, request, addr, data);
  1201. }
  1202. return ret;
  1203. }
  1204. #endif /* CONFIG_IA32_EMULATION */
  1205. #ifdef CONFIG_X86_64
  1206. static const struct user_regset x86_64_regsets[] = {
  1207. [REGSET_GENERAL] = {
  1208. .core_note_type = NT_PRSTATUS,
  1209. .n = sizeof(struct user_regs_struct) / sizeof(long),
  1210. .size = sizeof(long), .align = sizeof(long),
  1211. .get = genregs_get, .set = genregs_set
  1212. },
  1213. [REGSET_FP] = {
  1214. .core_note_type = NT_PRFPREG,
  1215. .n = sizeof(struct user_i387_struct) / sizeof(long),
  1216. .size = sizeof(long), .align = sizeof(long),
  1217. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1218. },
  1219. [REGSET_IOPERM64] = {
  1220. .core_note_type = NT_386_IOPERM,
  1221. .n = IO_BITMAP_LONGS,
  1222. .size = sizeof(long), .align = sizeof(long),
  1223. .active = ioperm_active, .get = ioperm_get
  1224. },
  1225. };
  1226. static const struct user_regset_view user_x86_64_view = {
  1227. .name = "x86_64", .e_machine = EM_X86_64,
  1228. .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
  1229. };
  1230. #else /* CONFIG_X86_32 */
  1231. #define user_regs_struct32 user_regs_struct
  1232. #define genregs32_get genregs_get
  1233. #define genregs32_set genregs_set
  1234. #define user_i387_ia32_struct user_i387_struct
  1235. #define user32_fxsr_struct user_fxsr_struct
  1236. #endif /* CONFIG_X86_64 */
  1237. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1238. static const struct user_regset x86_32_regsets[] = {
  1239. [REGSET_GENERAL] = {
  1240. .core_note_type = NT_PRSTATUS,
  1241. .n = sizeof(struct user_regs_struct32) / sizeof(u32),
  1242. .size = sizeof(u32), .align = sizeof(u32),
  1243. .get = genregs32_get, .set = genregs32_set
  1244. },
  1245. [REGSET_FP] = {
  1246. .core_note_type = NT_PRFPREG,
  1247. .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
  1248. .size = sizeof(u32), .align = sizeof(u32),
  1249. .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
  1250. },
  1251. [REGSET_XFP] = {
  1252. .core_note_type = NT_PRXFPREG,
  1253. .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
  1254. .size = sizeof(u32), .align = sizeof(u32),
  1255. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1256. },
  1257. [REGSET_TLS] = {
  1258. .core_note_type = NT_386_TLS,
  1259. .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
  1260. .size = sizeof(struct user_desc),
  1261. .align = sizeof(struct user_desc),
  1262. .active = regset_tls_active,
  1263. .get = regset_tls_get, .set = regset_tls_set
  1264. },
  1265. [REGSET_IOPERM32] = {
  1266. .core_note_type = NT_386_IOPERM,
  1267. .n = IO_BITMAP_BYTES / sizeof(u32),
  1268. .size = sizeof(u32), .align = sizeof(u32),
  1269. .active = ioperm_active, .get = ioperm_get
  1270. },
  1271. };
  1272. static const struct user_regset_view user_x86_32_view = {
  1273. .name = "i386", .e_machine = EM_386,
  1274. .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
  1275. };
  1276. #endif
  1277. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1278. {
  1279. #ifdef CONFIG_IA32_EMULATION
  1280. if (test_tsk_thread_flag(task, TIF_IA32))
  1281. #endif
  1282. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1283. return &user_x86_32_view;
  1284. #endif
  1285. #ifdef CONFIG_X86_64
  1286. return &user_x86_64_view;
  1287. #endif
  1288. }
  1289. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  1290. int error_code, int si_code)
  1291. {
  1292. struct siginfo info;
  1293. tsk->thread.trap_no = 1;
  1294. tsk->thread.error_code = error_code;
  1295. memset(&info, 0, sizeof(info));
  1296. info.si_signo = SIGTRAP;
  1297. info.si_code = si_code;
  1298. /* User-mode ip? */
  1299. info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
  1300. /* Send us the fake SIGTRAP */
  1301. force_sig_info(SIGTRAP, &info, tsk);
  1302. }
  1303. #ifdef CONFIG_X86_32
  1304. # define IS_IA32 1
  1305. #elif defined CONFIG_IA32_EMULATION
  1306. # define IS_IA32 test_thread_flag(TIF_IA32)
  1307. #else
  1308. # define IS_IA32 0
  1309. #endif
  1310. /*
  1311. * We must return the syscall number to actually look up in the table.
  1312. * This can be -1L to skip running any syscall at all.
  1313. */
  1314. asmregparm long syscall_trace_enter(struct pt_regs *regs)
  1315. {
  1316. long ret = 0;
  1317. /*
  1318. * If we stepped into a sysenter/syscall insn, it trapped in
  1319. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  1320. * If user-mode had set TF itself, then it's still clear from
  1321. * do_debug() and we need to set it again to restore the user
  1322. * state. If we entered on the slow path, TF was already set.
  1323. */
  1324. if (test_thread_flag(TIF_SINGLESTEP))
  1325. regs->flags |= X86_EFLAGS_TF;
  1326. /* do the secure computing check first */
  1327. secure_computing(regs->orig_ax);
  1328. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1329. ret = -1L;
  1330. if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
  1331. tracehook_report_syscall_entry(regs))
  1332. ret = -1L;
  1333. if (unlikely(current->audit_context)) {
  1334. if (IS_IA32)
  1335. audit_syscall_entry(AUDIT_ARCH_I386,
  1336. regs->orig_ax,
  1337. regs->bx, regs->cx,
  1338. regs->dx, regs->si);
  1339. #ifdef CONFIG_X86_64
  1340. else
  1341. audit_syscall_entry(AUDIT_ARCH_X86_64,
  1342. regs->orig_ax,
  1343. regs->di, regs->si,
  1344. regs->dx, regs->r10);
  1345. #endif
  1346. }
  1347. return ret ?: regs->orig_ax;
  1348. }
  1349. asmregparm void syscall_trace_leave(struct pt_regs *regs)
  1350. {
  1351. if (unlikely(current->audit_context))
  1352. audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
  1353. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1354. tracehook_report_syscall_exit(regs, 0);
  1355. /*
  1356. * If TIF_SYSCALL_EMU is set, we only get here because of
  1357. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  1358. * We already reported this syscall instruction in
  1359. * syscall_trace_enter(), so don't do any more now.
  1360. */
  1361. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1362. return;
  1363. /*
  1364. * If we are single-stepping, synthesize a trap to follow the
  1365. * system call instruction.
  1366. */
  1367. if (test_thread_flag(TIF_SINGLESTEP) &&
  1368. tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
  1369. send_sigtrap(current, regs, 0, TRAP_BRKPT);
  1370. }