i387.h 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. /*
  2. * Copyright (C) 1994 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * General FPU state handling cleanups
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. * x86-64 work by Andi Kleen 2002
  8. */
  9. #ifndef _ASM_X86_I387_H
  10. #define _ASM_X86_I387_H
  11. #ifndef __ASSEMBLY__
  12. #include <linux/sched.h>
  13. #include <linux/hardirq.h>
  14. #include <asm/system.h>
  15. struct pt_regs;
  16. struct user_i387_struct;
  17. extern int init_fpu(struct task_struct *child);
  18. extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
  19. extern void math_state_restore(void);
  20. extern bool irq_fpu_usable(void);
  21. extern void kernel_fpu_begin(void);
  22. extern void kernel_fpu_end(void);
  23. /*
  24. * Some instructions like VIA's padlock instructions generate a spurious
  25. * DNA fault but don't modify SSE registers. And these instructions
  26. * get used from interrupt context as well. To prevent these kernel instructions
  27. * in interrupt context interacting wrongly with other user/kernel fpu usage, we
  28. * should use them only in the context of irq_ts_save/restore()
  29. */
  30. static inline int irq_ts_save(void)
  31. {
  32. /*
  33. * If in process context and not atomic, we can take a spurious DNA fault.
  34. * Otherwise, doing clts() in process context requires disabling preemption
  35. * or some heavy lifting like kernel_fpu_begin()
  36. */
  37. if (!in_atomic())
  38. return 0;
  39. if (read_cr0() & X86_CR0_TS) {
  40. clts();
  41. return 1;
  42. }
  43. return 0;
  44. }
  45. static inline void irq_ts_restore(int TS_state)
  46. {
  47. if (TS_state)
  48. stts();
  49. }
  50. /*
  51. * The question "does this thread have fpu access?"
  52. * is slightly racy, since preemption could come in
  53. * and revoke it immediately after the test.
  54. *
  55. * However, even in that very unlikely scenario,
  56. * we can just assume we have FPU access - typically
  57. * to save the FP state - we'll just take a #NM
  58. * fault and get the FPU access back.
  59. */
  60. static inline int user_has_fpu(void)
  61. {
  62. return current->thread.fpu.has_fpu;
  63. }
  64. extern void unlazy_fpu(struct task_struct *tsk);
  65. #endif /* __ASSEMBLY__ */
  66. #endif /* _ASM_X86_I387_H */