i387.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * Copyright (C) 1994 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * General FPU state handling cleanups
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. * x86-64 work by Andi Kleen 2002
  8. */
  9. #ifndef _ASM_X86_I387_H
  10. #define _ASM_X86_I387_H
  11. #include <linux/sched.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/regset.h>
  14. #include <linux/hardirq.h>
  15. #include <asm/asm.h>
  16. #include <asm/processor.h>
  17. #include <asm/sigcontext.h>
  18. #include <asm/user.h>
  19. #include <asm/uaccess.h>
  20. extern void fpu_init(void);
  21. extern void mxcsr_feature_mask_init(void);
  22. extern int init_fpu(struct task_struct *child);
  23. extern asmlinkage void math_state_restore(void);
  24. extern void init_thread_xstate(void);
  25. extern user_regset_active_fn fpregs_active, xfpregs_active;
  26. extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
  27. extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
  28. #ifdef CONFIG_IA32_EMULATION
  29. struct _fpstate_ia32;
  30. extern int save_i387_ia32(struct _fpstate_ia32 __user *buf);
  31. extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf);
  32. #endif
  33. #ifdef CONFIG_X86_64
  34. /* Ignore delayed exceptions from user space */
  35. static inline void tolerant_fwait(void)
  36. {
  37. asm volatile("1: fwait\n"
  38. "2:\n"
  39. _ASM_EXTABLE(1b, 2b));
  40. }
  41. static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
  42. {
  43. int err;
  44. asm volatile("1: rex64/fxrstor (%[fx])\n\t"
  45. "2:\n"
  46. ".section .fixup,\"ax\"\n"
  47. "3: movl $-1,%[err]\n"
  48. " jmp 2b\n"
  49. ".previous\n"
  50. _ASM_EXTABLE(1b, 3b)
  51. : [err] "=r" (err)
  52. #if 0 /* See comment in __save_init_fpu() below. */
  53. : [fx] "r" (fx), "m" (*fx), "0" (0));
  54. #else
  55. : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
  56. #endif
  57. if (unlikely(err))
  58. init_fpu(current);
  59. return err;
  60. }
  61. #define X87_FSW_ES (1 << 7) /* Exception Summary */
  62. /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
  63. is pending. Clear the x87 state here by setting it to fixed
  64. values. The kernel data segment can be sometimes 0 and sometimes
  65. new user value. Both should be ok.
  66. Use the PDA as safe address because it should be already in L1. */
  67. static inline void clear_fpu_state(struct i387_fxsave_struct *fx)
  68. {
  69. if (unlikely(fx->swd & X87_FSW_ES))
  70. asm volatile("fnclex");
  71. alternative_input(ASM_NOP8 ASM_NOP2,
  72. " emms\n" /* clear stack tags */
  73. " fildl %%gs:0", /* load to clear state */
  74. X86_FEATURE_FXSAVE_LEAK);
  75. }
  76. static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
  77. {
  78. int err;
  79. asm volatile("1: rex64/fxsave (%[fx])\n\t"
  80. "2:\n"
  81. ".section .fixup,\"ax\"\n"
  82. "3: movl $-1,%[err]\n"
  83. " jmp 2b\n"
  84. ".previous\n"
  85. _ASM_EXTABLE(1b, 3b)
  86. : [err] "=r" (err), "=m" (*fx)
  87. #if 0 /* See comment in __fxsave_clear() below. */
  88. : [fx] "r" (fx), "0" (0));
  89. #else
  90. : [fx] "cdaSDb" (fx), "0" (0));
  91. #endif
  92. if (unlikely(err) &&
  93. __clear_user(fx, sizeof(struct i387_fxsave_struct)))
  94. err = -EFAULT;
  95. /* No need to clear here because the caller clears USED_MATH */
  96. return err;
  97. }
  98. static inline void __save_init_fpu(struct task_struct *tsk)
  99. {
  100. /* Using "rex64; fxsave %0" is broken because, if the memory operand
  101. uses any extended registers for addressing, a second REX prefix
  102. will be generated (to the assembler, rex64 followed by semicolon
  103. is a separate instruction), and hence the 64-bitness is lost. */
  104. #if 0
  105. /* Using "fxsaveq %0" would be the ideal choice, but is only supported
  106. starting with gas 2.16. */
  107. __asm__ __volatile__("fxsaveq %0"
  108. : "=m" (tsk->thread.xstate->fxsave));
  109. #elif 0
  110. /* Using, as a workaround, the properly prefixed form below isn't
  111. accepted by any binutils version so far released, complaining that
  112. the same type of prefix is used twice if an extended register is
  113. needed for addressing (fix submitted to mainline 2005-11-21). */
  114. __asm__ __volatile__("rex64/fxsave %0"
  115. : "=m" (tsk->thread.xstate->fxsave));
  116. #else
  117. /* This, however, we can work around by forcing the compiler to select
  118. an addressing mode that doesn't require extended registers. */
  119. __asm__ __volatile__("rex64/fxsave (%1)"
  120. : "=m" (tsk->thread.xstate->fxsave)
  121. : "cdaSDb" (&tsk->thread.xstate->fxsave));
  122. #endif
  123. clear_fpu_state(&tsk->thread.xstate->fxsave);
  124. task_thread_info(tsk)->status &= ~TS_USEDFPU;
  125. }
  126. #else /* CONFIG_X86_32 */
  127. extern void finit(void);
  128. static inline void tolerant_fwait(void)
  129. {
  130. asm volatile("fnclex ; fwait");
  131. }
  132. static inline void restore_fpu(struct task_struct *tsk)
  133. {
  134. /*
  135. * The "nop" is needed to make the instructions the same
  136. * length.
  137. */
  138. alternative_input(
  139. "nop ; frstor %1",
  140. "fxrstor %1",
  141. X86_FEATURE_FXSR,
  142. "m" (tsk->thread.xstate->fxsave));
  143. }
  144. /* We need a safe address that is cheap to find and that is already
  145. in L1 during context switch. The best choices are unfortunately
  146. different for UP and SMP */
  147. #ifdef CONFIG_SMP
  148. #define safe_address (__per_cpu_offset[0])
  149. #else
  150. #define safe_address (kstat_cpu(0).cpustat.user)
  151. #endif
  152. /*
  153. * These must be called with preempt disabled
  154. */
  155. static inline void __save_init_fpu(struct task_struct *tsk)
  156. {
  157. /* Use more nops than strictly needed in case the compiler
  158. varies code */
  159. alternative_input(
  160. "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
  161. "fxsave %[fx]\n"
  162. "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
  163. X86_FEATURE_FXSR,
  164. [fx] "m" (tsk->thread.xstate->fxsave),
  165. [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
  166. /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
  167. is pending. Clear the x87 state here by setting it to fixed
  168. values. safe_address is a random variable that should be in L1 */
  169. alternative_input(
  170. GENERIC_NOP8 GENERIC_NOP2,
  171. "emms\n\t" /* clear stack tags */
  172. "fildl %[addr]", /* set F?P to defined value */
  173. X86_FEATURE_FXSAVE_LEAK,
  174. [addr] "m" (safe_address));
  175. task_thread_info(tsk)->status &= ~TS_USEDFPU;
  176. }
  177. /*
  178. * Signal frame handlers...
  179. */
  180. extern int save_i387(struct _fpstate __user *buf);
  181. extern int restore_i387(struct _fpstate __user *buf);
  182. #endif /* CONFIG_X86_64 */
  183. static inline void __unlazy_fpu(struct task_struct *tsk)
  184. {
  185. if (task_thread_info(tsk)->status & TS_USEDFPU) {
  186. __save_init_fpu(tsk);
  187. stts();
  188. } else
  189. tsk->fpu_counter = 0;
  190. }
  191. static inline void __clear_fpu(struct task_struct *tsk)
  192. {
  193. if (task_thread_info(tsk)->status & TS_USEDFPU) {
  194. tolerant_fwait();
  195. task_thread_info(tsk)->status &= ~TS_USEDFPU;
  196. stts();
  197. }
  198. }
  199. static inline void kernel_fpu_begin(void)
  200. {
  201. struct thread_info *me = current_thread_info();
  202. preempt_disable();
  203. if (me->status & TS_USEDFPU)
  204. __save_init_fpu(me->task);
  205. else
  206. clts();
  207. }
  208. static inline void kernel_fpu_end(void)
  209. {
  210. stts();
  211. preempt_enable();
  212. }
  213. /*
  214. * Some instructions like VIA's padlock instructions generate a spurious
  215. * DNA fault but don't modify SSE registers. And these instructions
  216. * get used from interrupt context aswell. To prevent these kernel instructions
  217. * in interrupt context interact wrongly with other user/kernel fpu usage, we
  218. * should use them only in the context of irq_ts_save/restore()
  219. */
  220. static inline int irq_ts_save(void)
  221. {
  222. /*
  223. * If we are in process context, we are ok to take a spurious DNA fault.
  224. * Otherwise, doing clts() in process context require pre-emption to
  225. * be disabled or some heavy lifting like kernel_fpu_begin()
  226. */
  227. if (!in_interrupt())
  228. return 0;
  229. if (read_cr0() & X86_CR0_TS) {
  230. clts();
  231. return 1;
  232. }
  233. return 0;
  234. }
  235. static inline void irq_ts_restore(int TS_state)
  236. {
  237. if (TS_state)
  238. stts();
  239. }
  240. #ifdef CONFIG_X86_64
  241. static inline void save_init_fpu(struct task_struct *tsk)
  242. {
  243. __save_init_fpu(tsk);
  244. stts();
  245. }
  246. #define unlazy_fpu __unlazy_fpu
  247. #define clear_fpu __clear_fpu
  248. #else /* CONFIG_X86_32 */
  249. /*
  250. * These disable preemption on their own and are safe
  251. */
  252. static inline void save_init_fpu(struct task_struct *tsk)
  253. {
  254. preempt_disable();
  255. __save_init_fpu(tsk);
  256. stts();
  257. preempt_enable();
  258. }
  259. static inline void unlazy_fpu(struct task_struct *tsk)
  260. {
  261. preempt_disable();
  262. __unlazy_fpu(tsk);
  263. preempt_enable();
  264. }
  265. static inline void clear_fpu(struct task_struct *tsk)
  266. {
  267. preempt_disable();
  268. __clear_fpu(tsk);
  269. preempt_enable();
  270. }
  271. #endif /* CONFIG_X86_64 */
  272. /*
  273. * i387 state interaction
  274. */
  275. static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
  276. {
  277. if (cpu_has_fxsr) {
  278. return tsk->thread.xstate->fxsave.cwd;
  279. } else {
  280. return (unsigned short)tsk->thread.xstate->fsave.cwd;
  281. }
  282. }
  283. static inline unsigned short get_fpu_swd(struct task_struct *tsk)
  284. {
  285. if (cpu_has_fxsr) {
  286. return tsk->thread.xstate->fxsave.swd;
  287. } else {
  288. return (unsigned short)tsk->thread.xstate->fsave.swd;
  289. }
  290. }
  291. static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
  292. {
  293. if (cpu_has_xmm) {
  294. return tsk->thread.xstate->fxsave.mxcsr;
  295. } else {
  296. return MXCSR_DEFAULT;
  297. }
  298. }
  299. #endif /* _ASM_X86_I387_H */