ptrace.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. *
  6. * BTS tracing
  7. * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/errno.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/regset.h>
  16. #include <linux/user.h>
  17. #include <linux/security.h>
  18. #include <linux/audit.h>
  19. #include <linux/seccomp.h>
  20. #include <linux/signal.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/system.h>
  24. #include <asm/processor.h>
  25. #include <asm/i387.h>
  26. #include <asm/debugreg.h>
  27. #include <asm/ldt.h>
  28. #include <asm/desc.h>
  29. #include <asm/prctl.h>
  30. #include <asm/proto.h>
  31. #include <asm/ds.h>
  32. /*
  33. * does not yet catch signals sent when the child dies.
  34. * in exit.c or in signal.c.
  35. */
  36. /*
  37. * Determines which flags the user has access to [1 = access, 0 = no access].
  38. */
  39. #define FLAG_MASK_32 ((unsigned long) \
  40. (X86_EFLAGS_CF | X86_EFLAGS_PF | \
  41. X86_EFLAGS_AF | X86_EFLAGS_ZF | \
  42. X86_EFLAGS_SF | X86_EFLAGS_TF | \
  43. X86_EFLAGS_DF | X86_EFLAGS_OF | \
  44. X86_EFLAGS_RF | X86_EFLAGS_AC))
  45. /*
  46. * Determines whether a value may be installed in a segment register.
  47. */
  48. static inline bool invalid_selector(u16 value)
  49. {
  50. return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
  51. }
  52. #ifdef CONFIG_X86_32
  53. #define FLAG_MASK FLAG_MASK_32
  54. static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
  55. {
  56. BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
  57. regno >>= 2;
  58. if (regno > FS)
  59. --regno;
  60. return &regs->bx + regno;
  61. }
  62. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  63. {
  64. /*
  65. * Returning the value truncates it to 16 bits.
  66. */
  67. unsigned int retval;
  68. if (offset != offsetof(struct user_regs_struct, gs))
  69. retval = *pt_regs_access(task_pt_regs(task), offset);
  70. else {
  71. retval = task->thread.gs;
  72. if (task == current)
  73. savesegment(gs, retval);
  74. }
  75. return retval;
  76. }
  77. static int set_segment_reg(struct task_struct *task,
  78. unsigned long offset, u16 value)
  79. {
  80. /*
  81. * The value argument was already truncated to 16 bits.
  82. */
  83. if (invalid_selector(value))
  84. return -EIO;
  85. if (offset != offsetof(struct user_regs_struct, gs))
  86. *pt_regs_access(task_pt_regs(task), offset) = value;
  87. else {
  88. task->thread.gs = value;
  89. if (task == current)
  90. /*
  91. * The user-mode %gs is not affected by
  92. * kernel entry, so we must update the CPU.
  93. */
  94. loadsegment(gs, value);
  95. }
  96. return 0;
  97. }
  98. static unsigned long debugreg_addr_limit(struct task_struct *task)
  99. {
  100. return TASK_SIZE - 3;
  101. }
  102. #else /* CONFIG_X86_64 */
  103. #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
  104. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
  105. {
  106. BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
  107. return &regs->r15 + (offset / sizeof(regs->r15));
  108. }
  109. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  110. {
  111. /*
  112. * Returning the value truncates it to 16 bits.
  113. */
  114. unsigned int seg;
  115. switch (offset) {
  116. case offsetof(struct user_regs_struct, fs):
  117. if (task == current) {
  118. /* Older gas can't assemble movq %?s,%r?? */
  119. asm("movl %%fs,%0" : "=r" (seg));
  120. return seg;
  121. }
  122. return task->thread.fsindex;
  123. case offsetof(struct user_regs_struct, gs):
  124. if (task == current) {
  125. asm("movl %%gs,%0" : "=r" (seg));
  126. return seg;
  127. }
  128. return task->thread.gsindex;
  129. case offsetof(struct user_regs_struct, ds):
  130. if (task == current) {
  131. asm("movl %%ds,%0" : "=r" (seg));
  132. return seg;
  133. }
  134. return task->thread.ds;
  135. case offsetof(struct user_regs_struct, es):
  136. if (task == current) {
  137. asm("movl %%es,%0" : "=r" (seg));
  138. return seg;
  139. }
  140. return task->thread.es;
  141. case offsetof(struct user_regs_struct, cs):
  142. case offsetof(struct user_regs_struct, ss):
  143. break;
  144. }
  145. return *pt_regs_access(task_pt_regs(task), offset);
  146. }
  147. static int set_segment_reg(struct task_struct *task,
  148. unsigned long offset, u16 value)
  149. {
  150. /*
  151. * The value argument was already truncated to 16 bits.
  152. */
  153. if (invalid_selector(value))
  154. return -EIO;
  155. switch (offset) {
  156. case offsetof(struct user_regs_struct,fs):
  157. /*
  158. * If this is setting fs as for normal 64-bit use but
  159. * setting fs_base has implicitly changed it, leave it.
  160. */
  161. if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
  162. task->thread.fs != 0) ||
  163. (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
  164. task->thread.fs == 0))
  165. break;
  166. task->thread.fsindex = value;
  167. if (task == current)
  168. loadsegment(fs, task->thread.fsindex);
  169. break;
  170. case offsetof(struct user_regs_struct,gs):
  171. /*
  172. * If this is setting gs as for normal 64-bit use but
  173. * setting gs_base has implicitly changed it, leave it.
  174. */
  175. if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
  176. task->thread.gs != 0) ||
  177. (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
  178. task->thread.gs == 0))
  179. break;
  180. task->thread.gsindex = value;
  181. if (task == current)
  182. load_gs_index(task->thread.gsindex);
  183. break;
  184. case offsetof(struct user_regs_struct,ds):
  185. task->thread.ds = value;
  186. if (task == current)
  187. loadsegment(ds, task->thread.ds);
  188. break;
  189. case offsetof(struct user_regs_struct,es):
  190. task->thread.es = value;
  191. if (task == current)
  192. loadsegment(es, task->thread.es);
  193. break;
  194. /*
  195. * Can't actually change these in 64-bit mode.
  196. */
  197. case offsetof(struct user_regs_struct,cs):
  198. #ifdef CONFIG_IA32_EMULATION
  199. if (test_tsk_thread_flag(task, TIF_IA32))
  200. task_pt_regs(task)->cs = value;
  201. #endif
  202. break;
  203. case offsetof(struct user_regs_struct,ss):
  204. #ifdef CONFIG_IA32_EMULATION
  205. if (test_tsk_thread_flag(task, TIF_IA32))
  206. task_pt_regs(task)->ss = value;
  207. #endif
  208. break;
  209. }
  210. return 0;
  211. }
  212. static unsigned long debugreg_addr_limit(struct task_struct *task)
  213. {
  214. #ifdef CONFIG_IA32_EMULATION
  215. if (test_tsk_thread_flag(task, TIF_IA32))
  216. return IA32_PAGE_OFFSET - 3;
  217. #endif
  218. return TASK_SIZE64 - 7;
  219. }
  220. #endif /* CONFIG_X86_32 */
  221. static unsigned long get_flags(struct task_struct *task)
  222. {
  223. unsigned long retval = task_pt_regs(task)->flags;
  224. /*
  225. * If the debugger set TF, hide it from the readout.
  226. */
  227. if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  228. retval &= ~X86_EFLAGS_TF;
  229. return retval;
  230. }
  231. static int set_flags(struct task_struct *task, unsigned long value)
  232. {
  233. struct pt_regs *regs = task_pt_regs(task);
  234. /*
  235. * If the user value contains TF, mark that
  236. * it was not "us" (the debugger) that set it.
  237. * If not, make sure it stays set if we had.
  238. */
  239. if (value & X86_EFLAGS_TF)
  240. clear_tsk_thread_flag(task, TIF_FORCED_TF);
  241. else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  242. value |= X86_EFLAGS_TF;
  243. regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
  244. return 0;
  245. }
  246. static int putreg(struct task_struct *child,
  247. unsigned long offset, unsigned long value)
  248. {
  249. switch (offset) {
  250. case offsetof(struct user_regs_struct, cs):
  251. case offsetof(struct user_regs_struct, ds):
  252. case offsetof(struct user_regs_struct, es):
  253. case offsetof(struct user_regs_struct, fs):
  254. case offsetof(struct user_regs_struct, gs):
  255. case offsetof(struct user_regs_struct, ss):
  256. return set_segment_reg(child, offset, value);
  257. case offsetof(struct user_regs_struct, flags):
  258. return set_flags(child, value);
  259. #ifdef CONFIG_X86_64
  260. case offsetof(struct user_regs_struct,fs_base):
  261. if (value >= TASK_SIZE_OF(child))
  262. return -EIO;
  263. /*
  264. * When changing the segment base, use do_arch_prctl
  265. * to set either thread.fs or thread.fsindex and the
  266. * corresponding GDT slot.
  267. */
  268. if (child->thread.fs != value)
  269. return do_arch_prctl(child, ARCH_SET_FS, value);
  270. return 0;
  271. case offsetof(struct user_regs_struct,gs_base):
  272. /*
  273. * Exactly the same here as the %fs handling above.
  274. */
  275. if (value >= TASK_SIZE_OF(child))
  276. return -EIO;
  277. if (child->thread.gs != value)
  278. return do_arch_prctl(child, ARCH_SET_GS, value);
  279. return 0;
  280. #endif
  281. }
  282. *pt_regs_access(task_pt_regs(child), offset) = value;
  283. return 0;
  284. }
  285. static unsigned long getreg(struct task_struct *task, unsigned long offset)
  286. {
  287. switch (offset) {
  288. case offsetof(struct user_regs_struct, cs):
  289. case offsetof(struct user_regs_struct, ds):
  290. case offsetof(struct user_regs_struct, es):
  291. case offsetof(struct user_regs_struct, fs):
  292. case offsetof(struct user_regs_struct, gs):
  293. case offsetof(struct user_regs_struct, ss):
  294. return get_segment_reg(task, offset);
  295. case offsetof(struct user_regs_struct, flags):
  296. return get_flags(task);
  297. #ifdef CONFIG_X86_64
  298. case offsetof(struct user_regs_struct, fs_base): {
  299. /*
  300. * do_arch_prctl may have used a GDT slot instead of
  301. * the MSR. To userland, it appears the same either
  302. * way, except the %fs segment selector might not be 0.
  303. */
  304. unsigned int seg = task->thread.fsindex;
  305. if (task->thread.fs != 0)
  306. return task->thread.fs;
  307. if (task == current)
  308. asm("movl %%fs,%0" : "=r" (seg));
  309. if (seg != FS_TLS_SEL)
  310. return 0;
  311. return get_desc_base(&task->thread.tls_array[FS_TLS]);
  312. }
  313. case offsetof(struct user_regs_struct, gs_base): {
  314. /*
  315. * Exactly the same here as the %fs handling above.
  316. */
  317. unsigned int seg = task->thread.gsindex;
  318. if (task->thread.gs != 0)
  319. return task->thread.gs;
  320. if (task == current)
  321. asm("movl %%gs,%0" : "=r" (seg));
  322. if (seg != GS_TLS_SEL)
  323. return 0;
  324. return get_desc_base(&task->thread.tls_array[GS_TLS]);
  325. }
  326. #endif
  327. }
  328. return *pt_regs_access(task_pt_regs(task), offset);
  329. }
  330. static int genregs_get(struct task_struct *target,
  331. const struct user_regset *regset,
  332. unsigned int pos, unsigned int count,
  333. void *kbuf, void __user *ubuf)
  334. {
  335. if (kbuf) {
  336. unsigned long *k = kbuf;
  337. while (count > 0) {
  338. *k++ = getreg(target, pos);
  339. count -= sizeof(*k);
  340. pos += sizeof(*k);
  341. }
  342. } else {
  343. unsigned long __user *u = ubuf;
  344. while (count > 0) {
  345. if (__put_user(getreg(target, pos), u++))
  346. return -EFAULT;
  347. count -= sizeof(*u);
  348. pos += sizeof(*u);
  349. }
  350. }
  351. return 0;
  352. }
  353. static int genregs_set(struct task_struct *target,
  354. const struct user_regset *regset,
  355. unsigned int pos, unsigned int count,
  356. const void *kbuf, const void __user *ubuf)
  357. {
  358. int ret = 0;
  359. if (kbuf) {
  360. const unsigned long *k = kbuf;
  361. while (count > 0 && !ret) {
  362. ret = putreg(target, pos, *k++);
  363. count -= sizeof(*k);
  364. pos += sizeof(*k);
  365. }
  366. } else {
  367. const unsigned long __user *u = ubuf;
  368. while (count > 0 && !ret) {
  369. unsigned long word;
  370. ret = __get_user(word, u++);
  371. if (ret)
  372. break;
  373. ret = putreg(target, pos, word);
  374. count -= sizeof(*u);
  375. pos += sizeof(*u);
  376. }
  377. }
  378. return ret;
  379. }
  380. /*
  381. * This function is trivial and will be inlined by the compiler.
  382. * Having it separates the implementation details of debug
  383. * registers from the interface details of ptrace.
  384. */
  385. static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
  386. {
  387. switch (n) {
  388. case 0: return child->thread.debugreg0;
  389. case 1: return child->thread.debugreg1;
  390. case 2: return child->thread.debugreg2;
  391. case 3: return child->thread.debugreg3;
  392. case 6: return child->thread.debugreg6;
  393. case 7: return child->thread.debugreg7;
  394. }
  395. return 0;
  396. }
  397. static int ptrace_set_debugreg(struct task_struct *child,
  398. int n, unsigned long data)
  399. {
  400. int i;
  401. if (unlikely(n == 4 || n == 5))
  402. return -EIO;
  403. if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
  404. return -EIO;
  405. switch (n) {
  406. case 0: child->thread.debugreg0 = data; break;
  407. case 1: child->thread.debugreg1 = data; break;
  408. case 2: child->thread.debugreg2 = data; break;
  409. case 3: child->thread.debugreg3 = data; break;
  410. case 6:
  411. if ((data & ~0xffffffffUL) != 0)
  412. return -EIO;
  413. child->thread.debugreg6 = data;
  414. break;
  415. case 7:
  416. /*
  417. * Sanity-check data. Take one half-byte at once with
  418. * check = (val >> (16 + 4*i)) & 0xf. It contains the
  419. * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
  420. * 2 and 3 are LENi. Given a list of invalid values,
  421. * we do mask |= 1 << invalid_value, so that
  422. * (mask >> check) & 1 is a correct test for invalid
  423. * values.
  424. *
  425. * R/Wi contains the type of the breakpoint /
  426. * watchpoint, LENi contains the length of the watched
  427. * data in the watchpoint case.
  428. *
  429. * The invalid values are:
  430. * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
  431. * - R/Wi == 0x10 (break on I/O reads or writes), so
  432. * mask |= 0x4444.
  433. * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
  434. * 0x1110.
  435. *
  436. * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
  437. *
  438. * See the Intel Manual "System Programming Guide",
  439. * 15.2.4
  440. *
  441. * Note that LENi == 0x10 is defined on x86_64 in long
  442. * mode (i.e. even for 32-bit userspace software, but
  443. * 64-bit kernel), so the x86_64 mask value is 0x5454.
  444. * See the AMD manual no. 24593 (AMD64 System Programming)
  445. */
  446. #ifdef CONFIG_X86_32
  447. #define DR7_MASK 0x5f54
  448. #else
  449. #define DR7_MASK 0x5554
  450. #endif
  451. data &= ~DR_CONTROL_RESERVED;
  452. for (i = 0; i < 4; i++)
  453. if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
  454. return -EIO;
  455. child->thread.debugreg7 = data;
  456. if (data)
  457. set_tsk_thread_flag(child, TIF_DEBUG);
  458. else
  459. clear_tsk_thread_flag(child, TIF_DEBUG);
  460. break;
  461. }
  462. return 0;
  463. }
  464. static int ptrace_bts_get_size(struct task_struct *child)
  465. {
  466. if (!child->thread.ds_area_msr)
  467. return -ENXIO;
  468. return ds_get_bts_index((void *)child->thread.ds_area_msr);
  469. }
  470. static int ptrace_bts_read_record(struct task_struct *child,
  471. long index,
  472. struct bts_struct __user *out)
  473. {
  474. struct bts_struct ret;
  475. int retval;
  476. int bts_end;
  477. int bts_index;
  478. if (!child->thread.ds_area_msr)
  479. return -ENXIO;
  480. if (index < 0)
  481. return -EINVAL;
  482. bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
  483. if (bts_end <= index)
  484. return -EINVAL;
  485. /* translate the ptrace bts index into the ds bts index */
  486. bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
  487. bts_index -= (index + 1);
  488. if (bts_index < 0)
  489. bts_index += bts_end;
  490. retval = ds_read_bts((void *)child->thread.ds_area_msr,
  491. bts_index, &ret);
  492. if (retval)
  493. return retval;
  494. if (copy_to_user(out, &ret, sizeof(ret)))
  495. return -EFAULT;
  496. return sizeof(ret);
  497. }
  498. static int ptrace_bts_write_record(struct task_struct *child,
  499. const struct bts_struct *in)
  500. {
  501. int retval;
  502. if (!child->thread.ds_area_msr)
  503. return -ENXIO;
  504. retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
  505. if (retval)
  506. return retval;
  507. return sizeof(*in);
  508. }
  509. static int ptrace_bts_clear(struct task_struct *child)
  510. {
  511. if (!child->thread.ds_area_msr)
  512. return -ENXIO;
  513. return ds_clear((void *)child->thread.ds_area_msr);
  514. }
  515. static int ptrace_bts_drain(struct task_struct *child,
  516. struct bts_struct __user *out)
  517. {
  518. int end, i;
  519. void *ds = (void *)child->thread.ds_area_msr;
  520. if (!ds)
  521. return -ENXIO;
  522. end = ds_get_bts_index(ds);
  523. if (end <= 0)
  524. return end;
  525. for (i = 0; i < end; i++, out++) {
  526. struct bts_struct ret;
  527. int retval;
  528. retval = ds_read_bts(ds, i, &ret);
  529. if (retval < 0)
  530. return retval;
  531. if (copy_to_user(out, &ret, sizeof(ret)))
  532. return -EFAULT;
  533. }
  534. ds_clear(ds);
  535. return i;
  536. }
  537. static int ptrace_bts_config(struct task_struct *child,
  538. const struct ptrace_bts_config __user *ucfg)
  539. {
  540. struct ptrace_bts_config cfg;
  541. unsigned long debugctl_mask;
  542. int bts_size, ret;
  543. void *ds;
  544. if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
  545. return -EFAULT;
  546. bts_size = 0;
  547. ds = (void *)child->thread.ds_area_msr;
  548. if (ds) {
  549. bts_size = ds_get_bts_size(ds);
  550. if (bts_size < 0)
  551. return bts_size;
  552. }
  553. if (bts_size != cfg.size) {
  554. ret = ds_free((void **)&child->thread.ds_area_msr);
  555. if (ret < 0)
  556. return ret;
  557. if (cfg.size > 0)
  558. ret = ds_allocate((void **)&child->thread.ds_area_msr,
  559. cfg.size);
  560. ds = (void *)child->thread.ds_area_msr;
  561. if (ds)
  562. set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
  563. else
  564. clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
  565. if (ret < 0)
  566. return ret;
  567. bts_size = ds_get_bts_size(ds);
  568. if (bts_size <= 0)
  569. return bts_size;
  570. }
  571. if (ds) {
  572. if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
  573. ret = ds_set_overflow(ds, DS_O_SIGNAL);
  574. } else {
  575. ret = ds_set_overflow(ds, DS_O_WRAP);
  576. }
  577. if (ret < 0)
  578. return ret;
  579. }
  580. debugctl_mask = ds_debugctl_mask();
  581. if (ds && (cfg.flags & PTRACE_BTS_O_TRACE)) {
  582. child->thread.debugctlmsr |= debugctl_mask;
  583. set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  584. } else {
  585. /* there is no way for us to check whether we 'own'
  586. * the respective bits in the DEBUGCTL MSR, we're
  587. * about to clear */
  588. child->thread.debugctlmsr &= ~debugctl_mask;
  589. if (!child->thread.debugctlmsr)
  590. clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
  591. }
  592. if (ds && (cfg.flags & PTRACE_BTS_O_SCHED))
  593. set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  594. else
  595. clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
  596. return 0;
  597. }
  598. static int ptrace_bts_status(struct task_struct *child,
  599. struct ptrace_bts_config __user *ucfg)
  600. {
  601. void *ds = (void *)child->thread.ds_area_msr;
  602. struct ptrace_bts_config cfg;
  603. memset(&cfg, 0, sizeof(cfg));
  604. if (ds) {
  605. cfg.size = ds_get_bts_size(ds);
  606. if (ds_get_overflow(ds) == DS_O_SIGNAL)
  607. cfg.flags |= PTRACE_BTS_O_SIGNAL;
  608. if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
  609. child->thread.debugctlmsr & ds_debugctl_mask())
  610. cfg.flags |= PTRACE_BTS_O_TRACE;
  611. if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
  612. cfg.flags |= PTRACE_BTS_O_SCHED;
  613. }
  614. if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
  615. return -EFAULT;
  616. return sizeof(cfg);
  617. }
  618. void ptrace_bts_take_timestamp(struct task_struct *tsk,
  619. enum bts_qualifier qualifier)
  620. {
  621. struct bts_struct rec = {
  622. .qualifier = qualifier,
  623. .variant.jiffies = jiffies
  624. };
  625. ptrace_bts_write_record(tsk, &rec);
  626. }
  627. /*
  628. * Called by kernel/ptrace.c when detaching..
  629. *
  630. * Make sure the single step bit is not set.
  631. */
  632. void ptrace_disable(struct task_struct *child)
  633. {
  634. user_disable_single_step(child);
  635. #ifdef TIF_SYSCALL_EMU
  636. clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
  637. #endif
  638. ptrace_bts_config(child, /* options = */ 0);
  639. if (child->thread.ds_area_msr) {
  640. ds_free((void **)&child->thread.ds_area_msr);
  641. clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
  642. }
  643. }
  644. long arch_ptrace(struct task_struct *child, long request, long addr, long data)
  645. {
  646. int i, ret;
  647. unsigned long __user *datap = (unsigned long __user *)data;
  648. switch (request) {
  649. /* when I and D space are separate, these will need to be fixed. */
  650. case PTRACE_PEEKTEXT: /* read word at location addr. */
  651. case PTRACE_PEEKDATA:
  652. ret = generic_ptrace_peekdata(child, addr, data);
  653. break;
  654. /* read the word at location addr in the USER area. */
  655. case PTRACE_PEEKUSR: {
  656. unsigned long tmp;
  657. ret = -EIO;
  658. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  659. addr >= sizeof(struct user))
  660. break;
  661. tmp = 0; /* Default return condition */
  662. if (addr < sizeof(struct user_regs_struct))
  663. tmp = getreg(child, addr);
  664. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  665. addr <= offsetof(struct user, u_debugreg[7])) {
  666. addr -= offsetof(struct user, u_debugreg[0]);
  667. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  668. }
  669. ret = put_user(tmp, datap);
  670. break;
  671. }
  672. /* when I and D space are separate, this will have to be fixed. */
  673. case PTRACE_POKETEXT: /* write the word at location addr. */
  674. case PTRACE_POKEDATA:
  675. ret = generic_ptrace_pokedata(child, addr, data);
  676. break;
  677. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  678. ret = -EIO;
  679. if ((addr & (sizeof(data) - 1)) || addr < 0 ||
  680. addr >= sizeof(struct user))
  681. break;
  682. if (addr < sizeof(struct user_regs_struct))
  683. ret = putreg(child, addr, data);
  684. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  685. addr <= offsetof(struct user, u_debugreg[7])) {
  686. addr -= offsetof(struct user, u_debugreg[0]);
  687. ret = ptrace_set_debugreg(child,
  688. addr / sizeof(data), data);
  689. }
  690. break;
  691. case PTRACE_GETREGS: { /* Get all gp regs from the child. */
  692. if (!access_ok(VERIFY_WRITE, datap, sizeof(struct user_regs_struct))) {
  693. ret = -EIO;
  694. break;
  695. }
  696. for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
  697. __put_user(getreg(child, i), datap);
  698. datap++;
  699. }
  700. ret = 0;
  701. break;
  702. }
  703. case PTRACE_SETREGS: { /* Set all gp regs in the child. */
  704. unsigned long tmp;
  705. if (!access_ok(VERIFY_READ, datap, sizeof(struct user_regs_struct))) {
  706. ret = -EIO;
  707. break;
  708. }
  709. for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
  710. __get_user(tmp, datap);
  711. putreg(child, i, tmp);
  712. datap++;
  713. }
  714. ret = 0;
  715. break;
  716. }
  717. case PTRACE_GETFPREGS: { /* Get the child FPU state. */
  718. if (!access_ok(VERIFY_WRITE, datap,
  719. sizeof(struct user_i387_struct))) {
  720. ret = -EIO;
  721. break;
  722. }
  723. ret = 0;
  724. if (!tsk_used_math(child))
  725. init_fpu(child);
  726. get_fpregs((struct user_i387_struct __user *)data, child);
  727. break;
  728. }
  729. case PTRACE_SETFPREGS: { /* Set the child FPU state. */
  730. if (!access_ok(VERIFY_READ, datap,
  731. sizeof(struct user_i387_struct))) {
  732. ret = -EIO;
  733. break;
  734. }
  735. set_stopped_child_used_math(child);
  736. set_fpregs(child, (struct user_i387_struct __user *)data);
  737. ret = 0;
  738. break;
  739. }
  740. #ifdef CONFIG_X86_32
  741. case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
  742. if (!access_ok(VERIFY_WRITE, datap,
  743. sizeof(struct user_fxsr_struct))) {
  744. ret = -EIO;
  745. break;
  746. }
  747. if (!tsk_used_math(child))
  748. init_fpu(child);
  749. ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
  750. break;
  751. }
  752. case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
  753. if (!access_ok(VERIFY_READ, datap,
  754. sizeof(struct user_fxsr_struct))) {
  755. ret = -EIO;
  756. break;
  757. }
  758. set_stopped_child_used_math(child);
  759. ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
  760. break;
  761. }
  762. #endif
  763. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  764. case PTRACE_GET_THREAD_AREA:
  765. if (addr < 0)
  766. return -EIO;
  767. ret = do_get_thread_area(child, addr,
  768. (struct user_desc __user *) data);
  769. break;
  770. case PTRACE_SET_THREAD_AREA:
  771. if (addr < 0)
  772. return -EIO;
  773. ret = do_set_thread_area(child, addr,
  774. (struct user_desc __user *) data, 0);
  775. break;
  776. #endif
  777. #ifdef CONFIG_X86_64
  778. /* normal 64bit interface to access TLS data.
  779. Works just like arch_prctl, except that the arguments
  780. are reversed. */
  781. case PTRACE_ARCH_PRCTL:
  782. ret = do_arch_prctl(child, data, addr);
  783. break;
  784. #endif
  785. case PTRACE_BTS_CONFIG:
  786. ret = ptrace_bts_config
  787. (child, (struct ptrace_bts_config __user *)addr);
  788. break;
  789. case PTRACE_BTS_STATUS:
  790. ret = ptrace_bts_status
  791. (child, (struct ptrace_bts_config __user *)addr);
  792. break;
  793. case PTRACE_BTS_SIZE:
  794. ret = ptrace_bts_get_size(child);
  795. break;
  796. case PTRACE_BTS_GET:
  797. ret = ptrace_bts_read_record
  798. (child, data, (struct bts_struct __user *) addr);
  799. break;
  800. case PTRACE_BTS_CLEAR:
  801. ret = ptrace_bts_clear(child);
  802. break;
  803. case PTRACE_BTS_DRAIN:
  804. ret = ptrace_bts_drain
  805. (child, (struct bts_struct __user *) addr);
  806. break;
  807. default:
  808. ret = ptrace_request(child, request, addr, data);
  809. break;
  810. }
  811. return ret;
  812. }
  813. #ifdef CONFIG_IA32_EMULATION
  814. #include <linux/compat.h>
  815. #include <linux/syscalls.h>
  816. #include <asm/ia32.h>
  817. #include <asm/user32.h>
  818. #define R32(l,q) \
  819. case offsetof(struct user32, regs.l): \
  820. regs->q = value; break
  821. #define SEG32(rs) \
  822. case offsetof(struct user32, regs.rs): \
  823. return set_segment_reg(child, \
  824. offsetof(struct user_regs_struct, rs), \
  825. value); \
  826. break
  827. static int putreg32(struct task_struct *child, unsigned regno, u32 value)
  828. {
  829. struct pt_regs *regs = task_pt_regs(child);
  830. switch (regno) {
  831. SEG32(cs);
  832. SEG32(ds);
  833. SEG32(es);
  834. SEG32(fs);
  835. SEG32(gs);
  836. SEG32(ss);
  837. R32(ebx, bx);
  838. R32(ecx, cx);
  839. R32(edx, dx);
  840. R32(edi, di);
  841. R32(esi, si);
  842. R32(ebp, bp);
  843. R32(eax, ax);
  844. R32(orig_eax, orig_ax);
  845. R32(eip, ip);
  846. R32(esp, sp);
  847. case offsetof(struct user32, regs.eflags):
  848. return set_flags(child, value);
  849. case offsetof(struct user32, u_debugreg[0]) ...
  850. offsetof(struct user32, u_debugreg[7]):
  851. regno -= offsetof(struct user32, u_debugreg[0]);
  852. return ptrace_set_debugreg(child, regno / 4, value);
  853. default:
  854. if (regno > sizeof(struct user32) || (regno & 3))
  855. return -EIO;
  856. /*
  857. * Other dummy fields in the virtual user structure
  858. * are ignored
  859. */
  860. break;
  861. }
  862. return 0;
  863. }
  864. #undef R32
  865. #undef SEG32
  866. #define R32(l,q) \
  867. case offsetof(struct user32, regs.l): \
  868. *val = regs->q; break
  869. #define SEG32(rs) \
  870. case offsetof(struct user32, regs.rs): \
  871. *val = get_segment_reg(child, \
  872. offsetof(struct user_regs_struct, rs)); \
  873. break
  874. static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
  875. {
  876. struct pt_regs *regs = task_pt_regs(child);
  877. switch (regno) {
  878. SEG32(ds);
  879. SEG32(es);
  880. SEG32(fs);
  881. SEG32(gs);
  882. R32(cs, cs);
  883. R32(ss, ss);
  884. R32(ebx, bx);
  885. R32(ecx, cx);
  886. R32(edx, dx);
  887. R32(edi, di);
  888. R32(esi, si);
  889. R32(ebp, bp);
  890. R32(eax, ax);
  891. R32(orig_eax, orig_ax);
  892. R32(eip, ip);
  893. R32(esp, sp);
  894. case offsetof(struct user32, regs.eflags):
  895. *val = get_flags(child);
  896. break;
  897. case offsetof(struct user32, u_debugreg[0]) ...
  898. offsetof(struct user32, u_debugreg[7]):
  899. regno -= offsetof(struct user32, u_debugreg[0]);
  900. *val = ptrace_get_debugreg(child, regno / 4);
  901. break;
  902. default:
  903. if (regno > sizeof(struct user32) || (regno & 3))
  904. return -EIO;
  905. /*
  906. * Other dummy fields in the virtual user structure
  907. * are ignored
  908. */
  909. *val = 0;
  910. break;
  911. }
  912. return 0;
  913. }
  914. #undef R32
  915. #undef SEG32
  916. static int genregs32_get(struct task_struct *target,
  917. const struct user_regset *regset,
  918. unsigned int pos, unsigned int count,
  919. void *kbuf, void __user *ubuf)
  920. {
  921. if (kbuf) {
  922. compat_ulong_t *k = kbuf;
  923. while (count > 0) {
  924. getreg32(target, pos, k++);
  925. count -= sizeof(*k);
  926. pos += sizeof(*k);
  927. }
  928. } else {
  929. compat_ulong_t __user *u = ubuf;
  930. while (count > 0) {
  931. compat_ulong_t word;
  932. getreg32(target, pos, &word);
  933. if (__put_user(word, u++))
  934. return -EFAULT;
  935. count -= sizeof(*u);
  936. pos += sizeof(*u);
  937. }
  938. }
  939. return 0;
  940. }
  941. static int genregs32_set(struct task_struct *target,
  942. const struct user_regset *regset,
  943. unsigned int pos, unsigned int count,
  944. const void *kbuf, const void __user *ubuf)
  945. {
  946. int ret = 0;
  947. if (kbuf) {
  948. const compat_ulong_t *k = kbuf;
  949. while (count > 0 && !ret) {
  950. ret = putreg(target, pos, *k++);
  951. count -= sizeof(*k);
  952. pos += sizeof(*k);
  953. }
  954. } else {
  955. const compat_ulong_t __user *u = ubuf;
  956. while (count > 0 && !ret) {
  957. compat_ulong_t word;
  958. ret = __get_user(word, u++);
  959. if (ret)
  960. break;
  961. ret = putreg(target, pos, word);
  962. count -= sizeof(*u);
  963. pos += sizeof(*u);
  964. }
  965. }
  966. return ret;
  967. }
  968. static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
  969. {
  970. siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
  971. compat_siginfo_t __user *si32 = compat_ptr(data);
  972. siginfo_t ssi;
  973. int ret;
  974. if (request == PTRACE_SETSIGINFO) {
  975. memset(&ssi, 0, sizeof(siginfo_t));
  976. ret = copy_siginfo_from_user32(&ssi, si32);
  977. if (ret)
  978. return ret;
  979. if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
  980. return -EFAULT;
  981. }
  982. ret = sys_ptrace(request, pid, addr, (unsigned long)si);
  983. if (ret)
  984. return ret;
  985. if (request == PTRACE_GETSIGINFO) {
  986. if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
  987. return -EFAULT;
  988. ret = copy_siginfo_to_user32(si32, &ssi);
  989. }
  990. return ret;
  991. }
  992. asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
  993. {
  994. struct task_struct *child;
  995. struct pt_regs *childregs;
  996. void __user *datap = compat_ptr(data);
  997. int ret;
  998. __u32 val;
  999. switch (request) {
  1000. case PTRACE_TRACEME:
  1001. case PTRACE_ATTACH:
  1002. case PTRACE_KILL:
  1003. case PTRACE_CONT:
  1004. case PTRACE_SINGLESTEP:
  1005. case PTRACE_SINGLEBLOCK:
  1006. case PTRACE_DETACH:
  1007. case PTRACE_SYSCALL:
  1008. case PTRACE_OLDSETOPTIONS:
  1009. case PTRACE_SETOPTIONS:
  1010. case PTRACE_SET_THREAD_AREA:
  1011. case PTRACE_GET_THREAD_AREA:
  1012. case PTRACE_BTS_CONFIG:
  1013. case PTRACE_BTS_STATUS:
  1014. case PTRACE_BTS_SIZE:
  1015. case PTRACE_BTS_GET:
  1016. case PTRACE_BTS_CLEAR:
  1017. case PTRACE_BTS_DRAIN:
  1018. return sys_ptrace(request, pid, addr, data);
  1019. default:
  1020. return -EINVAL;
  1021. case PTRACE_PEEKTEXT:
  1022. case PTRACE_PEEKDATA:
  1023. case PTRACE_POKEDATA:
  1024. case PTRACE_POKETEXT:
  1025. case PTRACE_POKEUSR:
  1026. case PTRACE_PEEKUSR:
  1027. case PTRACE_GETREGS:
  1028. case PTRACE_SETREGS:
  1029. case PTRACE_SETFPREGS:
  1030. case PTRACE_GETFPREGS:
  1031. case PTRACE_SETFPXREGS:
  1032. case PTRACE_GETFPXREGS:
  1033. case PTRACE_GETEVENTMSG:
  1034. break;
  1035. case PTRACE_SETSIGINFO:
  1036. case PTRACE_GETSIGINFO:
  1037. return ptrace32_siginfo(request, pid, addr, data);
  1038. }
  1039. child = ptrace_get_task_struct(pid);
  1040. if (IS_ERR(child))
  1041. return PTR_ERR(child);
  1042. ret = ptrace_check_attach(child, request == PTRACE_KILL);
  1043. if (ret < 0)
  1044. goto out;
  1045. childregs = task_pt_regs(child);
  1046. switch (request) {
  1047. case PTRACE_PEEKDATA:
  1048. case PTRACE_PEEKTEXT:
  1049. ret = 0;
  1050. if (access_process_vm(child, addr, &val, sizeof(u32), 0) !=
  1051. sizeof(u32))
  1052. ret = -EIO;
  1053. else
  1054. ret = put_user(val, (unsigned int __user *)datap);
  1055. break;
  1056. case PTRACE_POKEDATA:
  1057. case PTRACE_POKETEXT:
  1058. ret = 0;
  1059. if (access_process_vm(child, addr, &data, sizeof(u32), 1) !=
  1060. sizeof(u32))
  1061. ret = -EIO;
  1062. break;
  1063. case PTRACE_PEEKUSR:
  1064. ret = getreg32(child, addr, &val);
  1065. if (ret == 0)
  1066. ret = put_user(val, (__u32 __user *)datap);
  1067. break;
  1068. case PTRACE_POKEUSR:
  1069. ret = putreg32(child, addr, data);
  1070. break;
  1071. case PTRACE_GETREGS: { /* Get all gp regs from the child. */
  1072. int i;
  1073. if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
  1074. ret = -EIO;
  1075. break;
  1076. }
  1077. ret = 0;
  1078. for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(__u32)) {
  1079. getreg32(child, i, &val);
  1080. ret |= __put_user(val, (u32 __user *)datap);
  1081. datap += sizeof(u32);
  1082. }
  1083. break;
  1084. }
  1085. case PTRACE_SETREGS: { /* Set all gp regs in the child. */
  1086. unsigned long tmp;
  1087. int i;
  1088. if (!access_ok(VERIFY_READ, datap, 16*4)) {
  1089. ret = -EIO;
  1090. break;
  1091. }
  1092. ret = 0;
  1093. for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(u32)) {
  1094. ret |= __get_user(tmp, (u32 __user *)datap);
  1095. putreg32(child, i, tmp);
  1096. datap += sizeof(u32);
  1097. }
  1098. break;
  1099. }
  1100. case PTRACE_GETFPREGS:
  1101. ret = -EIO;
  1102. if (!access_ok(VERIFY_READ, compat_ptr(data),
  1103. sizeof(struct user_i387_struct)))
  1104. break;
  1105. save_i387_ia32(child, datap, childregs, 1);
  1106. ret = 0;
  1107. break;
  1108. case PTRACE_SETFPREGS:
  1109. ret = -EIO;
  1110. if (!access_ok(VERIFY_WRITE, datap,
  1111. sizeof(struct user_i387_struct)))
  1112. break;
  1113. ret = 0;
  1114. /* don't check EFAULT to be bug-to-bug compatible to i386 */
  1115. restore_i387_ia32(child, datap, 1);
  1116. break;
  1117. case PTRACE_GETFPXREGS: {
  1118. struct user32_fxsr_struct __user *u = datap;
  1119. init_fpu(child);
  1120. ret = -EIO;
  1121. if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
  1122. break;
  1123. ret = -EFAULT;
  1124. if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
  1125. break;
  1126. ret = __put_user(childregs->cs, &u->fcs);
  1127. ret |= __put_user(child->thread.ds, &u->fos);
  1128. break;
  1129. }
  1130. case PTRACE_SETFPXREGS: {
  1131. struct user32_fxsr_struct __user *u = datap;
  1132. unlazy_fpu(child);
  1133. ret = -EIO;
  1134. if (!access_ok(VERIFY_READ, u, sizeof(*u)))
  1135. break;
  1136. /*
  1137. * no checking to be bug-to-bug compatible with i386.
  1138. * but silence warning
  1139. */
  1140. if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
  1141. ;
  1142. set_stopped_child_used_math(child);
  1143. child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
  1144. ret = 0;
  1145. break;
  1146. }
  1147. case PTRACE_GETEVENTMSG:
  1148. ret = put_user(child->ptrace_message,
  1149. (unsigned int __user *)compat_ptr(data));
  1150. break;
  1151. default:
  1152. BUG();
  1153. }
  1154. out:
  1155. put_task_struct(child);
  1156. return ret;
  1157. }
  1158. #endif /* CONFIG_IA32_EMULATION */
  1159. #ifdef CONFIG_X86_32
  1160. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
  1161. {
  1162. struct siginfo info;
  1163. tsk->thread.trap_no = 1;
  1164. tsk->thread.error_code = error_code;
  1165. memset(&info, 0, sizeof(info));
  1166. info.si_signo = SIGTRAP;
  1167. info.si_code = TRAP_BRKPT;
  1168. /* User-mode ip? */
  1169. info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
  1170. /* Send us the fake SIGTRAP */
  1171. force_sig_info(SIGTRAP, &info, tsk);
  1172. }
  1173. /* notification of system call entry/exit
  1174. * - triggered by current->work.syscall_trace
  1175. */
  1176. __attribute__((regparm(3)))
  1177. int do_syscall_trace(struct pt_regs *regs, int entryexit)
  1178. {
  1179. int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
  1180. /*
  1181. * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
  1182. * interception
  1183. */
  1184. int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
  1185. int ret = 0;
  1186. /* do the secure computing check first */
  1187. if (!entryexit)
  1188. secure_computing(regs->orig_ax);
  1189. if (unlikely(current->audit_context)) {
  1190. if (entryexit)
  1191. audit_syscall_exit(AUDITSC_RESULT(regs->ax),
  1192. regs->ax);
  1193. /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
  1194. * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
  1195. * not used, entry.S will call us only on syscall exit, not
  1196. * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
  1197. * calling send_sigtrap() on syscall entry.
  1198. *
  1199. * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
  1200. * is_singlestep is false, despite his name, so we will still do
  1201. * the correct thing.
  1202. */
  1203. else if (is_singlestep)
  1204. goto out;
  1205. }
  1206. if (!(current->ptrace & PT_PTRACED))
  1207. goto out;
  1208. /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
  1209. * and then is resumed with SYSEMU_SINGLESTEP, it will come in
  1210. * here. We have to check this and return */
  1211. if (is_sysemu && entryexit)
  1212. return 0;
  1213. /* Fake a debug trap */
  1214. if (is_singlestep)
  1215. send_sigtrap(current, regs, 0);
  1216. if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
  1217. goto out;
  1218. /* the 0x80 provides a way for the tracing parent to distinguish
  1219. between a syscall stop and SIGTRAP delivery */
  1220. /* Note that the debugger could change the result of test_thread_flag!*/
  1221. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
  1222. /*
  1223. * this isn't the same as continuing with a signal, but it will do
  1224. * for normal use. strace only continues with a signal if the
  1225. * stopping signal is not SIGTRAP. -brl
  1226. */
  1227. if (current->exit_code) {
  1228. send_sig(current->exit_code, current, 1);
  1229. current->exit_code = 0;
  1230. }
  1231. ret = is_sysemu;
  1232. out:
  1233. if (unlikely(current->audit_context) && !entryexit)
  1234. audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
  1235. regs->bx, regs->cx, regs->dx, regs->si);
  1236. if (ret == 0)
  1237. return 0;
  1238. regs->orig_ax = -1; /* force skip of syscall restarting */
  1239. if (unlikely(current->audit_context))
  1240. audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
  1241. return 1;
  1242. }
  1243. #else /* CONFIG_X86_64 */
  1244. static void syscall_trace(struct pt_regs *regs)
  1245. {
  1246. #if 0
  1247. printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
  1248. current->comm,
  1249. regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
  1250. current_thread_info()->flags, current->ptrace);
  1251. #endif
  1252. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  1253. ? 0x80 : 0));
  1254. /*
  1255. * this isn't the same as continuing with a signal, but it will do
  1256. * for normal use. strace only continues with a signal if the
  1257. * stopping signal is not SIGTRAP. -brl
  1258. */
  1259. if (current->exit_code) {
  1260. send_sig(current->exit_code, current, 1);
  1261. current->exit_code = 0;
  1262. }
  1263. }
  1264. asmlinkage void syscall_trace_enter(struct pt_regs *regs)
  1265. {
  1266. /* do the secure computing check first */
  1267. secure_computing(regs->orig_ax);
  1268. if (test_thread_flag(TIF_SYSCALL_TRACE)
  1269. && (current->ptrace & PT_PTRACED))
  1270. syscall_trace(regs);
  1271. if (unlikely(current->audit_context)) {
  1272. if (test_thread_flag(TIF_IA32)) {
  1273. audit_syscall_entry(AUDIT_ARCH_I386,
  1274. regs->orig_ax,
  1275. regs->bx, regs->cx,
  1276. regs->dx, regs->si);
  1277. } else {
  1278. audit_syscall_entry(AUDIT_ARCH_X86_64,
  1279. regs->orig_ax,
  1280. regs->di, regs->si,
  1281. regs->dx, regs->r10);
  1282. }
  1283. }
  1284. }
  1285. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  1286. {
  1287. if (unlikely(current->audit_context))
  1288. audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
  1289. if ((test_thread_flag(TIF_SYSCALL_TRACE)
  1290. || test_thread_flag(TIF_SINGLESTEP))
  1291. && (current->ptrace & PT_PTRACED))
  1292. syscall_trace(regs);
  1293. }
  1294. #endif /* CONFIG_X86_32 */