i387.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * Copyright (C) 1994 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * General FPU state handling cleanups
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. #include <linux/module.h>
  9. #include <linux/regset.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <asm/sigcontext.h>
  13. #include <asm/processor.h>
  14. #include <asm/math_emu.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/i387.h>
  18. #include <asm/user.h>
  19. #ifdef CONFIG_X86_64
  20. # include <asm/sigcontext32.h>
  21. # include <asm/user32.h>
  22. #else
  23. # define save_i387_xstate_ia32 save_i387_xstate
  24. # define restore_i387_xstate_ia32 restore_i387_xstate
  25. # define _fpstate_ia32 _fpstate
  26. # define _xstate_ia32 _xstate
  27. # define sig_xstate_ia32_size sig_xstate_size
  28. # define fx_sw_reserved_ia32 fx_sw_reserved
  29. # define user_i387_ia32_struct user_i387_struct
  30. # define user32_fxsr_struct user_fxsr_struct
  31. #endif
  32. #ifdef CONFIG_MATH_EMULATION
  33. # define HAVE_HWFP (boot_cpu_data.hard_math)
  34. #else
  35. # define HAVE_HWFP 1
  36. #endif
  37. static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
  38. unsigned int xstate_size;
  39. EXPORT_SYMBOL_GPL(xstate_size);
  40. unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
  41. static struct i387_fxsave_struct fx_scratch __cpuinitdata;
  42. void __cpuinit mxcsr_feature_mask_init(void)
  43. {
  44. unsigned long mask = 0;
  45. clts();
  46. if (cpu_has_fxsr) {
  47. memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
  48. asm volatile("fxsave %0" : : "m" (fx_scratch));
  49. mask = fx_scratch.mxcsr_mask;
  50. if (mask == 0)
  51. mask = 0x0000ffbf;
  52. }
  53. mxcsr_feature_mask &= mask;
  54. stts();
  55. }
  56. static void __cpuinit init_thread_xstate(void)
  57. {
  58. /*
  59. * Note that xstate_size might be overwriten later during
  60. * xsave_init().
  61. */
  62. if (!HAVE_HWFP) {
  63. xstate_size = sizeof(struct i387_soft_struct);
  64. return;
  65. }
  66. if (cpu_has_fxsr)
  67. xstate_size = sizeof(struct i387_fxsave_struct);
  68. #ifdef CONFIG_X86_32
  69. else
  70. xstate_size = sizeof(struct i387_fsave_struct);
  71. #endif
  72. }
  73. #ifdef CONFIG_X86_64
  74. /*
  75. * Called at bootup to set up the initial FPU state that is later cloned
  76. * into all processes.
  77. */
  78. void __cpuinit fpu_init(void)
  79. {
  80. unsigned long oldcr0 = read_cr0();
  81. set_in_cr4(X86_CR4_OSFXSR);
  82. set_in_cr4(X86_CR4_OSXMMEXCPT);
  83. write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
  84. if (!smp_processor_id())
  85. init_thread_xstate();
  86. mxcsr_feature_mask_init();
  87. /* clean state in init */
  88. current_thread_info()->status = 0;
  89. clear_used_math();
  90. }
  91. #else /* CONFIG_X86_64 */
  92. void __cpuinit fpu_init(void)
  93. {
  94. if (!smp_processor_id())
  95. init_thread_xstate();
  96. }
  97. #endif /* CONFIG_X86_32 */
  98. void fpu_finit(struct fpu *fpu)
  99. {
  100. #ifdef CONFIG_X86_32
  101. if (!HAVE_HWFP) {
  102. finit_soft_fpu(&fpu->state->soft);
  103. return;
  104. }
  105. #endif
  106. if (cpu_has_fxsr) {
  107. struct i387_fxsave_struct *fx = &fpu->state->fxsave;
  108. memset(fx, 0, xstate_size);
  109. fx->cwd = 0x37f;
  110. if (cpu_has_xmm)
  111. fx->mxcsr = MXCSR_DEFAULT;
  112. } else {
  113. struct i387_fsave_struct *fp = &fpu->state->fsave;
  114. memset(fp, 0, xstate_size);
  115. fp->cwd = 0xffff037fu;
  116. fp->swd = 0xffff0000u;
  117. fp->twd = 0xffffffffu;
  118. fp->fos = 0xffff0000u;
  119. }
  120. }
  121. EXPORT_SYMBOL_GPL(fpu_finit);
  122. /*
  123. * The _current_ task is using the FPU for the first time
  124. * so initialize it and set the mxcsr to its default
  125. * value at reset if we support XMM instructions and then
  126. * remeber the current task has used the FPU.
  127. */
  128. int init_fpu(struct task_struct *tsk)
  129. {
  130. int ret;
  131. if (tsk_used_math(tsk)) {
  132. if (HAVE_HWFP && tsk == current)
  133. unlazy_fpu(tsk);
  134. return 0;
  135. }
  136. /*
  137. * Memory allocation at the first usage of the FPU and other state.
  138. */
  139. ret = fpu_alloc(&tsk->thread.fpu);
  140. if (ret)
  141. return ret;
  142. fpu_finit(&tsk->thread.fpu);
  143. set_stopped_child_used_math(tsk);
  144. return 0;
  145. }
  146. /*
  147. * The xstateregs_active() routine is the same as the fpregs_active() routine,
  148. * as the "regset->n" for the xstate regset will be updated based on the feature
  149. * capabilites supported by the xsave.
  150. */
  151. int fpregs_active(struct task_struct *target, const struct user_regset *regset)
  152. {
  153. return tsk_used_math(target) ? regset->n : 0;
  154. }
  155. int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
  156. {
  157. return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
  158. }
  159. int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
  160. unsigned int pos, unsigned int count,
  161. void *kbuf, void __user *ubuf)
  162. {
  163. int ret;
  164. if (!cpu_has_fxsr)
  165. return -ENODEV;
  166. ret = init_fpu(target);
  167. if (ret)
  168. return ret;
  169. sanitize_i387_state(target);
  170. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  171. &target->thread.fpu.state->fxsave, 0, -1);
  172. }
  173. int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
  174. unsigned int pos, unsigned int count,
  175. const void *kbuf, const void __user *ubuf)
  176. {
  177. int ret;
  178. if (!cpu_has_fxsr)
  179. return -ENODEV;
  180. ret = init_fpu(target);
  181. if (ret)
  182. return ret;
  183. sanitize_i387_state(target);
  184. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  185. &target->thread.fpu.state->fxsave, 0, -1);
  186. /*
  187. * mxcsr reserved bits must be masked to zero for security reasons.
  188. */
  189. target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
  190. /*
  191. * update the header bits in the xsave header, indicating the
  192. * presence of FP and SSE state.
  193. */
  194. if (cpu_has_xsave)
  195. target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
  196. return ret;
  197. }
  198. int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
  199. unsigned int pos, unsigned int count,
  200. void *kbuf, void __user *ubuf)
  201. {
  202. int ret;
  203. if (!cpu_has_xsave)
  204. return -ENODEV;
  205. ret = init_fpu(target);
  206. if (ret)
  207. return ret;
  208. /*
  209. * Copy the 48bytes defined by the software first into the xstate
  210. * memory layout in the thread struct, so that we can copy the entire
  211. * xstateregs to the user using one user_regset_copyout().
  212. */
  213. memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
  214. xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
  215. /*
  216. * Copy the xstate memory layout.
  217. */
  218. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  219. &target->thread.fpu.state->xsave, 0, -1);
  220. return ret;
  221. }
  222. int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
  223. unsigned int pos, unsigned int count,
  224. const void *kbuf, const void __user *ubuf)
  225. {
  226. int ret;
  227. struct xsave_hdr_struct *xsave_hdr;
  228. if (!cpu_has_xsave)
  229. return -ENODEV;
  230. ret = init_fpu(target);
  231. if (ret)
  232. return ret;
  233. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  234. &target->thread.fpu.state->xsave, 0, -1);
  235. /*
  236. * mxcsr reserved bits must be masked to zero for security reasons.
  237. */
  238. target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
  239. xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
  240. xsave_hdr->xstate_bv &= pcntxt_mask;
  241. /*
  242. * These bits must be zero.
  243. */
  244. xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
  245. return ret;
  246. }
  247. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  248. /*
  249. * FPU tag word conversions.
  250. */
  251. static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
  252. {
  253. unsigned int tmp; /* to avoid 16 bit prefixes in the code */
  254. /* Transform each pair of bits into 01 (valid) or 00 (empty) */
  255. tmp = ~twd;
  256. tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
  257. /* and move the valid bits to the lower byte. */
  258. tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
  259. tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
  260. tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
  261. return tmp;
  262. }
  263. #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
  264. #define FP_EXP_TAG_VALID 0
  265. #define FP_EXP_TAG_ZERO 1
  266. #define FP_EXP_TAG_SPECIAL 2
  267. #define FP_EXP_TAG_EMPTY 3
  268. static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
  269. {
  270. struct _fpxreg *st;
  271. u32 tos = (fxsave->swd >> 11) & 7;
  272. u32 twd = (unsigned long) fxsave->twd;
  273. u32 tag;
  274. u32 ret = 0xffff0000u;
  275. int i;
  276. for (i = 0; i < 8; i++, twd >>= 1) {
  277. if (twd & 0x1) {
  278. st = FPREG_ADDR(fxsave, (i - tos) & 7);
  279. switch (st->exponent & 0x7fff) {
  280. case 0x7fff:
  281. tag = FP_EXP_TAG_SPECIAL;
  282. break;
  283. case 0x0000:
  284. if (!st->significand[0] &&
  285. !st->significand[1] &&
  286. !st->significand[2] &&
  287. !st->significand[3])
  288. tag = FP_EXP_TAG_ZERO;
  289. else
  290. tag = FP_EXP_TAG_SPECIAL;
  291. break;
  292. default:
  293. if (st->significand[3] & 0x8000)
  294. tag = FP_EXP_TAG_VALID;
  295. else
  296. tag = FP_EXP_TAG_SPECIAL;
  297. break;
  298. }
  299. } else {
  300. tag = FP_EXP_TAG_EMPTY;
  301. }
  302. ret |= tag << (2 * i);
  303. }
  304. return ret;
  305. }
  306. /*
  307. * FXSR floating point environment conversions.
  308. */
  309. static void
  310. convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
  311. {
  312. struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
  313. struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
  314. struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
  315. int i;
  316. env->cwd = fxsave->cwd | 0xffff0000u;
  317. env->swd = fxsave->swd | 0xffff0000u;
  318. env->twd = twd_fxsr_to_i387(fxsave);
  319. #ifdef CONFIG_X86_64
  320. env->fip = fxsave->rip;
  321. env->foo = fxsave->rdp;
  322. if (tsk == current) {
  323. /*
  324. * should be actually ds/cs at fpu exception time, but
  325. * that information is not available in 64bit mode.
  326. */
  327. asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
  328. asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
  329. } else {
  330. struct pt_regs *regs = task_pt_regs(tsk);
  331. env->fos = 0xffff0000 | tsk->thread.ds;
  332. env->fcs = regs->cs;
  333. }
  334. #else
  335. env->fip = fxsave->fip;
  336. env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
  337. env->foo = fxsave->foo;
  338. env->fos = fxsave->fos;
  339. #endif
  340. for (i = 0; i < 8; ++i)
  341. memcpy(&to[i], &from[i], sizeof(to[0]));
  342. }
  343. static void convert_to_fxsr(struct task_struct *tsk,
  344. const struct user_i387_ia32_struct *env)
  345. {
  346. struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
  347. struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
  348. struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
  349. int i;
  350. fxsave->cwd = env->cwd;
  351. fxsave->swd = env->swd;
  352. fxsave->twd = twd_i387_to_fxsr(env->twd);
  353. fxsave->fop = (u16) ((u32) env->fcs >> 16);
  354. #ifdef CONFIG_X86_64
  355. fxsave->rip = env->fip;
  356. fxsave->rdp = env->foo;
  357. /* cs and ds ignored */
  358. #else
  359. fxsave->fip = env->fip;
  360. fxsave->fcs = (env->fcs & 0xffff);
  361. fxsave->foo = env->foo;
  362. fxsave->fos = env->fos;
  363. #endif
  364. for (i = 0; i < 8; ++i)
  365. memcpy(&to[i], &from[i], sizeof(from[0]));
  366. }
  367. int fpregs_get(struct task_struct *target, const struct user_regset *regset,
  368. unsigned int pos, unsigned int count,
  369. void *kbuf, void __user *ubuf)
  370. {
  371. struct user_i387_ia32_struct env;
  372. int ret;
  373. ret = init_fpu(target);
  374. if (ret)
  375. return ret;
  376. if (!HAVE_HWFP)
  377. return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
  378. if (!cpu_has_fxsr) {
  379. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  380. &target->thread.fpu.state->fsave, 0,
  381. -1);
  382. }
  383. sanitize_i387_state(target);
  384. if (kbuf && pos == 0 && count == sizeof(env)) {
  385. convert_from_fxsr(kbuf, target);
  386. return 0;
  387. }
  388. convert_from_fxsr(&env, target);
  389. return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
  390. }
  391. int fpregs_set(struct task_struct *target, const struct user_regset *regset,
  392. unsigned int pos, unsigned int count,
  393. const void *kbuf, const void __user *ubuf)
  394. {
  395. struct user_i387_ia32_struct env;
  396. int ret;
  397. ret = init_fpu(target);
  398. if (ret)
  399. return ret;
  400. sanitize_i387_state(target);
  401. if (!HAVE_HWFP)
  402. return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
  403. if (!cpu_has_fxsr) {
  404. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  405. &target->thread.fpu.state->fsave, 0, -1);
  406. }
  407. if (pos > 0 || count < sizeof(env))
  408. convert_from_fxsr(&env, target);
  409. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
  410. if (!ret)
  411. convert_to_fxsr(target, &env);
  412. /*
  413. * update the header bit in the xsave header, indicating the
  414. * presence of FP.
  415. */
  416. if (cpu_has_xsave)
  417. target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
  418. return ret;
  419. }
  420. /*
  421. * Signal frame handlers.
  422. */
  423. static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
  424. {
  425. struct task_struct *tsk = current;
  426. struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
  427. fp->status = fp->swd;
  428. if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
  429. return -1;
  430. return 1;
  431. }
  432. static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
  433. {
  434. struct task_struct *tsk = current;
  435. struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
  436. struct user_i387_ia32_struct env;
  437. int err = 0;
  438. convert_from_fxsr(&env, tsk);
  439. if (__copy_to_user(buf, &env, sizeof(env)))
  440. return -1;
  441. err |= __put_user(fx->swd, &buf->status);
  442. err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
  443. if (err)
  444. return -1;
  445. if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
  446. return -1;
  447. return 1;
  448. }
  449. static int save_i387_xsave(void __user *buf)
  450. {
  451. struct task_struct *tsk = current;
  452. struct _fpstate_ia32 __user *fx = buf;
  453. int err = 0;
  454. sanitize_i387_state(tsk);
  455. /*
  456. * For legacy compatible, we always set FP/SSE bits in the bit
  457. * vector while saving the state to the user context.
  458. * This will enable us capturing any changes(during sigreturn) to
  459. * the FP/SSE bits by the legacy applications which don't touch
  460. * xstate_bv in the xsave header.
  461. *
  462. * xsave aware applications can change the xstate_bv in the xsave
  463. * header as well as change any contents in the memory layout.
  464. * xrestore as part of sigreturn will capture all the changes.
  465. */
  466. tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
  467. if (save_i387_fxsave(fx) < 0)
  468. return -1;
  469. err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
  470. sizeof(struct _fpx_sw_bytes));
  471. err |= __put_user(FP_XSTATE_MAGIC2,
  472. (__u32 __user *) (buf + sig_xstate_ia32_size
  473. - FP_XSTATE_MAGIC2_SIZE));
  474. if (err)
  475. return -1;
  476. return 1;
  477. }
  478. int save_i387_xstate_ia32(void __user *buf)
  479. {
  480. struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
  481. struct task_struct *tsk = current;
  482. if (!used_math())
  483. return 0;
  484. if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
  485. return -EACCES;
  486. /*
  487. * This will cause a "finit" to be triggered by the next
  488. * attempted FPU operation by the 'current' process.
  489. */
  490. clear_used_math();
  491. if (!HAVE_HWFP) {
  492. return fpregs_soft_get(current, NULL,
  493. 0, sizeof(struct user_i387_ia32_struct),
  494. NULL, fp) ? -1 : 1;
  495. }
  496. unlazy_fpu(tsk);
  497. if (cpu_has_xsave)
  498. return save_i387_xsave(fp);
  499. if (cpu_has_fxsr)
  500. return save_i387_fxsave(fp);
  501. else
  502. return save_i387_fsave(fp);
  503. }
  504. static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
  505. {
  506. struct task_struct *tsk = current;
  507. return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
  508. sizeof(struct i387_fsave_struct));
  509. }
  510. static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
  511. unsigned int size)
  512. {
  513. struct task_struct *tsk = current;
  514. struct user_i387_ia32_struct env;
  515. int err;
  516. err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
  517. size);
  518. /* mxcsr reserved bits must be masked to zero for security reasons */
  519. tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
  520. if (err || __copy_from_user(&env, buf, sizeof(env)))
  521. return 1;
  522. convert_to_fxsr(tsk, &env);
  523. return 0;
  524. }
  525. static int restore_i387_xsave(void __user *buf)
  526. {
  527. struct _fpx_sw_bytes fx_sw_user;
  528. struct _fpstate_ia32 __user *fx_user =
  529. ((struct _fpstate_ia32 __user *) buf);
  530. struct i387_fxsave_struct __user *fx =
  531. (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
  532. struct xsave_hdr_struct *xsave_hdr =
  533. &current->thread.fpu.state->xsave.xsave_hdr;
  534. u64 mask;
  535. int err;
  536. if (check_for_xstate(fx, buf, &fx_sw_user))
  537. goto fx_only;
  538. mask = fx_sw_user.xstate_bv;
  539. err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
  540. xsave_hdr->xstate_bv &= pcntxt_mask;
  541. /*
  542. * These bits must be zero.
  543. */
  544. xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
  545. /*
  546. * Init the state that is not present in the memory layout
  547. * and enabled by the OS.
  548. */
  549. mask = ~(pcntxt_mask & ~mask);
  550. xsave_hdr->xstate_bv &= mask;
  551. return err;
  552. fx_only:
  553. /*
  554. * Couldn't find the extended state information in the memory
  555. * layout. Restore the FP/SSE and init the other extended state
  556. * enabled by the OS.
  557. */
  558. xsave_hdr->xstate_bv = XSTATE_FPSSE;
  559. return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
  560. }
  561. int restore_i387_xstate_ia32(void __user *buf)
  562. {
  563. int err;
  564. struct task_struct *tsk = current;
  565. struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
  566. if (HAVE_HWFP)
  567. clear_fpu(tsk);
  568. if (!buf) {
  569. if (used_math()) {
  570. clear_fpu(tsk);
  571. clear_used_math();
  572. }
  573. return 0;
  574. } else
  575. if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
  576. return -EACCES;
  577. if (!used_math()) {
  578. err = init_fpu(tsk);
  579. if (err)
  580. return err;
  581. }
  582. if (HAVE_HWFP) {
  583. if (cpu_has_xsave)
  584. err = restore_i387_xsave(buf);
  585. else if (cpu_has_fxsr)
  586. err = restore_i387_fxsave(fp, sizeof(struct
  587. i387_fxsave_struct));
  588. else
  589. err = restore_i387_fsave(fp);
  590. } else {
  591. err = fpregs_soft_set(current, NULL,
  592. 0, sizeof(struct user_i387_ia32_struct),
  593. NULL, fp) != 0;
  594. }
  595. set_used_math();
  596. return err;
  597. }
  598. /*
  599. * FPU state for core dumps.
  600. * This is only used for a.out dumps now.
  601. * It is declared generically using elf_fpregset_t (which is
  602. * struct user_i387_struct) but is in fact only used for 32-bit
  603. * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
  604. */
  605. int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
  606. {
  607. struct task_struct *tsk = current;
  608. int fpvalid;
  609. fpvalid = !!used_math();
  610. if (fpvalid)
  611. fpvalid = !fpregs_get(tsk, NULL,
  612. 0, sizeof(struct user_i387_ia32_struct),
  613. fpu, NULL);
  614. return fpvalid;
  615. }
  616. EXPORT_SYMBOL(dump_fpu);
  617. #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */