|
@@ -108,15 +108,6 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|
|
dec_preempt_count();
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
-static inline void
|
|
|
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
|
|
|
-{
|
|
|
- if (!user_mode_vm(regs))
|
|
|
- die(str, regs, err);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
static void __kprobes
|
|
|
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
|
long error_code, siginfo_t *info)
|
|
@@ -729,30 +720,14 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
|
|
|
conditional_sti(regs);
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
- if (cpu_has_xmm) {
|
|
|
- /* Handle SIMD FPU exceptions on PIII+ processors. */
|
|
|
- ignore_fpu_irq = 1;
|
|
|
- simd_math_error((void __user *)regs->ip);
|
|
|
- return;
|
|
|
- }
|
|
|
- /*
|
|
|
- * Handle strange cache flush from user space exception
|
|
|
- * in all other cases. This is undocumented behaviour.
|
|
|
- */
|
|
|
- if (regs->flags & X86_VM_MASK) {
|
|
|
- handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
|
|
|
- return;
|
|
|
- }
|
|
|
- current->thread.trap_no = 19;
|
|
|
- current->thread.error_code = error_code;
|
|
|
- die_if_kernel("cache flush denied", regs, error_code);
|
|
|
- force_sig(SIGSEGV, current);
|
|
|
+ ignore_fpu_irq = 1;
|
|
|
#else
|
|
|
if (!user_mode(regs) &&
|
|
|
kernel_math_error(regs, "kernel simd math error", 19))
|
|
|
return;
|
|
|
- simd_math_error((void __user *)regs->ip);
|
|
|
#endif
|
|
|
+
|
|
|
+ simd_math_error((void __user *)regs->ip);
|
|
|
}
|
|
|
|
|
|
dotraplinkage void
|