traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * Handle hardware traps and faults.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/kallsyms.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/kgdb.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/string.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/kexec.h>
  25. #include <linux/sched.h>
  26. #include <linux/timer.h>
  27. #include <linux/init.h>
  28. #include <linux/bug.h>
  29. #include <linux/nmi.h>
  30. #include <linux/mm.h>
  31. #include <linux/smp.h>
  32. #include <linux/io.h>
  33. #ifdef CONFIG_EISA
  34. #include <linux/ioport.h>
  35. #include <linux/eisa.h>
  36. #endif
  37. #ifdef CONFIG_MCA
  38. #include <linux/mca.h>
  39. #endif
  40. #if defined(CONFIG_EDAC)
  41. #include <linux/edac.h>
  42. #endif
  43. #include <asm/kmemcheck.h>
  44. #include <asm/stacktrace.h>
  45. #include <asm/processor.h>
  46. #include <asm/debugreg.h>
  47. #include <linux/atomic.h>
  48. #include <asm/ftrace.h>
  49. #include <asm/traps.h>
  50. #include <asm/desc.h>
  51. #include <asm/i387.h>
  52. #include <asm/fpu-internal.h>
  53. #include <asm/mce.h>
  54. #include <asm/mach_traps.h>
  55. #ifdef CONFIG_X86_64
  56. #include <asm/x86_init.h>
  57. #include <asm/pgalloc.h>
  58. #include <asm/proto.h>
  59. #else
  60. #include <asm/processor-flags.h>
  61. #include <asm/setup.h>
  62. asmlinkage int system_call(void);
  63. /* Do we ignore FPU interrupts ? */
  64. char ignore_fpu_irq;
  65. /*
  66. * The IDT has to be page-aligned to simplify the Pentium
  67. * F0 0F bug workaround.
  68. */
  69. gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
  70. #endif
  71. DECLARE_BITMAP(used_vectors, NR_VECTORS);
  72. EXPORT_SYMBOL_GPL(used_vectors);
  73. static inline void conditional_sti(struct pt_regs *regs)
  74. {
  75. if (regs->flags & X86_EFLAGS_IF)
  76. local_irq_enable();
  77. }
  78. static inline void preempt_conditional_sti(struct pt_regs *regs)
  79. {
  80. inc_preempt_count();
  81. if (regs->flags & X86_EFLAGS_IF)
  82. local_irq_enable();
  83. }
  84. static inline void conditional_cli(struct pt_regs *regs)
  85. {
  86. if (regs->flags & X86_EFLAGS_IF)
  87. local_irq_disable();
  88. }
  89. static inline void preempt_conditional_cli(struct pt_regs *regs)
  90. {
  91. if (regs->flags & X86_EFLAGS_IF)
  92. local_irq_disable();
  93. dec_preempt_count();
  94. }
  95. static void __kprobes
  96. do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
  97. long error_code, siginfo_t *info)
  98. {
  99. struct task_struct *tsk = current;
  100. #ifdef CONFIG_X86_32
  101. if (regs->flags & X86_VM_MASK) {
  102. /*
  103. * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
  104. * On nmi (interrupt 2), do_trap should not be called.
  105. */
  106. if (trapnr < X86_TRAP_UD)
  107. goto vm86_trap;
  108. goto trap_signal;
  109. }
  110. #endif
  111. if (!user_mode(regs))
  112. goto kernel_trap;
  113. #ifdef CONFIG_X86_32
  114. trap_signal:
  115. #endif
  116. /*
  117. * We want error_code and trap_nr set for userspace faults and
  118. * kernelspace faults which result in die(), but not
  119. * kernelspace faults which are fixed up. die() gives the
  120. * process no chance to handle the signal and notice the
  121. * kernel fault information, so that won't result in polluting
  122. * the information about previously queued, but not yet
  123. * delivered, faults. See also do_general_protection below.
  124. */
  125. tsk->thread.error_code = error_code;
  126. tsk->thread.trap_nr = trapnr;
  127. #ifdef CONFIG_X86_64
  128. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  129. printk_ratelimit()) {
  130. printk(KERN_INFO
  131. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  132. tsk->comm, tsk->pid, str,
  133. regs->ip, regs->sp, error_code);
  134. print_vma_addr(" in ", regs->ip);
  135. printk("\n");
  136. }
  137. #endif
  138. if (info)
  139. force_sig_info(signr, info, tsk);
  140. else
  141. force_sig(signr, tsk);
  142. return;
  143. kernel_trap:
  144. if (!fixup_exception(regs)) {
  145. tsk->thread.error_code = error_code;
  146. tsk->thread.trap_nr = trapnr;
  147. die(str, regs, error_code);
  148. }
  149. return;
  150. #ifdef CONFIG_X86_32
  151. vm86_trap:
  152. if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
  153. error_code, trapnr))
  154. goto trap_signal;
  155. return;
  156. #endif
  157. }
  158. #define DO_ERROR(trapnr, signr, str, name) \
  159. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  160. { \
  161. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  162. == NOTIFY_STOP) \
  163. return; \
  164. conditional_sti(regs); \
  165. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  166. }
  167. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  168. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  169. { \
  170. siginfo_t info; \
  171. info.si_signo = signr; \
  172. info.si_errno = 0; \
  173. info.si_code = sicode; \
  174. info.si_addr = (void __user *)siaddr; \
  175. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  176. == NOTIFY_STOP) \
  177. return; \
  178. conditional_sti(regs); \
  179. do_trap(trapnr, signr, str, regs, error_code, &info); \
  180. }
  181. DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
  182. regs->ip)
  183. DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
  184. DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
  185. DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
  186. regs->ip)
  187. DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
  188. coprocessor_segment_overrun)
  189. DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
  190. DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
  191. #ifdef CONFIG_X86_32
  192. DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
  193. #endif
  194. DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
  195. BUS_ADRALN, 0)
  196. #ifdef CONFIG_X86_64
  197. /* Runs on IST stack */
  198. dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  199. {
  200. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  201. X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
  202. return;
  203. preempt_conditional_sti(regs);
  204. do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
  205. preempt_conditional_cli(regs);
  206. }
  207. dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
  208. {
  209. static const char str[] = "double fault";
  210. struct task_struct *tsk = current;
  211. /* Return not checked because double check cannot be ignored */
  212. notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
  213. tsk->thread.error_code = error_code;
  214. tsk->thread.trap_nr = X86_TRAP_DF;
  215. /*
  216. * This is always a kernel trap and never fixable (and thus must
  217. * never return).
  218. */
  219. for (;;)
  220. die(str, regs, error_code);
  221. }
  222. #endif
  223. dotraplinkage void __kprobes
  224. do_general_protection(struct pt_regs *regs, long error_code)
  225. {
  226. struct task_struct *tsk;
  227. conditional_sti(regs);
  228. #ifdef CONFIG_X86_32
  229. if (regs->flags & X86_VM_MASK)
  230. goto gp_in_vm86;
  231. #endif
  232. tsk = current;
  233. if (!user_mode(regs))
  234. goto gp_in_kernel;
  235. tsk->thread.error_code = error_code;
  236. tsk->thread.trap_nr = X86_TRAP_GP;
  237. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  238. printk_ratelimit()) {
  239. printk(KERN_INFO
  240. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  241. tsk->comm, task_pid_nr(tsk),
  242. regs->ip, regs->sp, error_code);
  243. print_vma_addr(" in ", regs->ip);
  244. printk("\n");
  245. }
  246. force_sig(SIGSEGV, tsk);
  247. return;
  248. #ifdef CONFIG_X86_32
  249. gp_in_vm86:
  250. local_irq_enable();
  251. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  252. return;
  253. #endif
  254. gp_in_kernel:
  255. if (fixup_exception(regs))
  256. return;
  257. tsk->thread.error_code = error_code;
  258. tsk->thread.trap_nr = X86_TRAP_GP;
  259. if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
  260. X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
  261. return;
  262. die("general protection fault", regs, error_code);
  263. }
  264. /* May run on IST stack. */
  265. dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
  266. {
  267. #ifdef CONFIG_DYNAMIC_FTRACE
  268. /* ftrace must be first, everything else may cause a recursive crash */
  269. if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs))
  270. return;
  271. #endif
  272. #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
  273. if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  274. SIGTRAP) == NOTIFY_STOP)
  275. return;
  276. #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
  277. if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
  278. SIGTRAP) == NOTIFY_STOP)
  279. return;
  280. /*
  281. * Let others (NMI) know that the debug stack is in use
  282. * as we may switch to the interrupt stack.
  283. */
  284. debug_stack_usage_inc();
  285. preempt_conditional_sti(regs);
  286. do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
  287. preempt_conditional_cli(regs);
  288. debug_stack_usage_dec();
  289. }
  290. #ifdef CONFIG_X86_64
  291. /*
  292. * Help handler running on IST stack to switch back to user stack
  293. * for scheduling or signal handling. The actual stack switch is done in
  294. * entry.S
  295. */
  296. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  297. {
  298. struct pt_regs *regs = eregs;
  299. /* Did already sync */
  300. if (eregs == (struct pt_regs *)eregs->sp)
  301. ;
  302. /* Exception from user space */
  303. else if (user_mode(eregs))
  304. regs = task_pt_regs(current);
  305. /*
  306. * Exception from kernel and interrupts are enabled. Move to
  307. * kernel process stack.
  308. */
  309. else if (eregs->flags & X86_EFLAGS_IF)
  310. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  311. if (eregs != regs)
  312. *regs = *eregs;
  313. return regs;
  314. }
  315. #endif
  316. /*
  317. * Our handling of the processor debug registers is non-trivial.
  318. * We do not clear them on entry and exit from the kernel. Therefore
  319. * it is possible to get a watchpoint trap here from inside the kernel.
  320. * However, the code in ./ptrace.c has ensured that the user can
  321. * only set watchpoints on userspace addresses. Therefore the in-kernel
  322. * watchpoint trap can only occur in code which is reading/writing
  323. * from user space. Such code must not hold kernel locks (since it
  324. * can equally take a page fault), therefore it is safe to call
  325. * force_sig_info even though that claims and releases locks.
  326. *
  327. * Code in ./signal.c ensures that the debug control register
  328. * is restored before we deliver any signal, and therefore that
  329. * user code runs with the correct debug control register even though
  330. * we clear it here.
  331. *
  332. * Being careful here means that we don't have to be as careful in a
  333. * lot of more complicated places (task switching can be a bit lazy
  334. * about restoring all the debug state, and ptrace doesn't have to
  335. * find every occurrence of the TF bit that could be saved away even
  336. * by user code)
  337. *
  338. * May run on IST stack.
  339. */
  340. dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
  341. {
  342. struct task_struct *tsk = current;
  343. int user_icebp = 0;
  344. unsigned long dr6;
  345. int si_code;
  346. get_debugreg(dr6, 6);
  347. /* Filter out all the reserved bits which are preset to 1 */
  348. dr6 &= ~DR6_RESERVED;
  349. /*
  350. * If dr6 has no reason to give us about the origin of this trap,
  351. * then it's very likely the result of an icebp/int01 trap.
  352. * User wants a sigtrap for that.
  353. */
  354. if (!dr6 && user_mode(regs))
  355. user_icebp = 1;
  356. /* Catch kmemcheck conditions first of all! */
  357. if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
  358. return;
  359. /* DR6 may or may not be cleared by the CPU */
  360. set_debugreg(0, 6);
  361. /*
  362. * The processor cleared BTF, so don't mark that we need it set.
  363. */
  364. clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
  365. /* Store the virtualized DR6 value */
  366. tsk->thread.debugreg6 = dr6;
  367. if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code,
  368. SIGTRAP) == NOTIFY_STOP)
  369. return;
  370. /*
  371. * Let others (NMI) know that the debug stack is in use
  372. * as we may switch to the interrupt stack.
  373. */
  374. debug_stack_usage_inc();
  375. /* It's safe to allow irq's after DR6 has been saved */
  376. preempt_conditional_sti(regs);
  377. if (regs->flags & X86_VM_MASK) {
  378. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
  379. X86_TRAP_DB);
  380. preempt_conditional_cli(regs);
  381. debug_stack_usage_dec();
  382. return;
  383. }
  384. /*
  385. * Single-stepping through system calls: ignore any exceptions in
  386. * kernel space, but re-enable TF when returning to user mode.
  387. *
  388. * We already checked v86 mode above, so we can check for kernel mode
  389. * by just checking the CPL of CS.
  390. */
  391. if ((dr6 & DR_STEP) && !user_mode(regs)) {
  392. tsk->thread.debugreg6 &= ~DR_STEP;
  393. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  394. regs->flags &= ~X86_EFLAGS_TF;
  395. }
  396. si_code = get_si_code(tsk->thread.debugreg6);
  397. if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
  398. send_sigtrap(tsk, regs, error_code, si_code);
  399. preempt_conditional_cli(regs);
  400. debug_stack_usage_dec();
  401. return;
  402. }
  403. /*
  404. * Note that we play around with the 'TS' bit in an attempt to get
  405. * the correct behaviour even in the presence of the asynchronous
  406. * IRQ13 behaviour
  407. */
  408. void math_error(struct pt_regs *regs, int error_code, int trapnr)
  409. {
  410. struct task_struct *task = current;
  411. siginfo_t info;
  412. unsigned short err;
  413. char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
  414. "simd exception";
  415. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
  416. return;
  417. conditional_sti(regs);
  418. if (!user_mode_vm(regs))
  419. {
  420. if (!fixup_exception(regs)) {
  421. task->thread.error_code = error_code;
  422. task->thread.trap_nr = trapnr;
  423. die(str, regs, error_code);
  424. }
  425. return;
  426. }
  427. /*
  428. * Save the info for the exception handler and clear the error.
  429. */
  430. save_init_fpu(task);
  431. task->thread.trap_nr = trapnr;
  432. task->thread.error_code = error_code;
  433. info.si_signo = SIGFPE;
  434. info.si_errno = 0;
  435. info.si_addr = (void __user *)regs->ip;
  436. if (trapnr == X86_TRAP_MF) {
  437. unsigned short cwd, swd;
  438. /*
  439. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  440. * status. 0x3f is the exception bits in these regs, 0x200 is the
  441. * C1 reg you need in case of a stack fault, 0x040 is the stack
  442. * fault bit. We should only be taking one exception at a time,
  443. * so if this combination doesn't produce any single exception,
  444. * then we have a bad program that isn't synchronizing its FPU usage
  445. * and it will suffer the consequences since we won't be able to
  446. * fully reproduce the context of the exception
  447. */
  448. cwd = get_fpu_cwd(task);
  449. swd = get_fpu_swd(task);
  450. err = swd & ~cwd;
  451. } else {
  452. /*
  453. * The SIMD FPU exceptions are handled a little differently, as there
  454. * is only a single status/control register. Thus, to determine which
  455. * unmasked exception was caught we must mask the exception mask bits
  456. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  457. */
  458. unsigned short mxcsr = get_fpu_mxcsr(task);
  459. err = ~(mxcsr >> 7) & mxcsr;
  460. }
  461. if (err & 0x001) { /* Invalid op */
  462. /*
  463. * swd & 0x240 == 0x040: Stack Underflow
  464. * swd & 0x240 == 0x240: Stack Overflow
  465. * User must clear the SF bit (0x40) if set
  466. */
  467. info.si_code = FPE_FLTINV;
  468. } else if (err & 0x004) { /* Divide by Zero */
  469. info.si_code = FPE_FLTDIV;
  470. } else if (err & 0x008) { /* Overflow */
  471. info.si_code = FPE_FLTOVF;
  472. } else if (err & 0x012) { /* Denormal, Underflow */
  473. info.si_code = FPE_FLTUND;
  474. } else if (err & 0x020) { /* Precision */
  475. info.si_code = FPE_FLTRES;
  476. } else {
  477. /*
  478. * If we're using IRQ 13, or supposedly even some trap
  479. * X86_TRAP_MF implementations, it's possible
  480. * we get a spurious trap, which is not an error.
  481. */
  482. return;
  483. }
  484. force_sig_info(SIGFPE, &info, task);
  485. }
  486. dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
  487. {
  488. #ifdef CONFIG_X86_32
  489. ignore_fpu_irq = 1;
  490. #endif
  491. math_error(regs, error_code, X86_TRAP_MF);
  492. }
  493. dotraplinkage void
  494. do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
  495. {
  496. math_error(regs, error_code, X86_TRAP_XF);
  497. }
  498. dotraplinkage void
  499. do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
  500. {
  501. conditional_sti(regs);
  502. #if 0
  503. /* No need to warn about this any longer. */
  504. printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  505. #endif
  506. }
  507. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  508. {
  509. }
  510. asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
  511. {
  512. }
  513. /*
  514. * 'math_state_restore()' saves the current math information in the
  515. * old math state array, and gets the new ones from the current task
  516. *
  517. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  518. * Don't touch unless you *really* know how it works.
  519. *
  520. * Must be called with kernel preemption disabled (eg with local
  521. * local interrupts as in the case of do_device_not_available).
  522. */
  523. void math_state_restore(void)
  524. {
  525. struct task_struct *tsk = current;
  526. if (!tsk_used_math(tsk)) {
  527. local_irq_enable();
  528. /*
  529. * does a slab alloc which can sleep
  530. */
  531. if (init_fpu(tsk)) {
  532. /*
  533. * ran out of memory!
  534. */
  535. do_group_exit(SIGKILL);
  536. return;
  537. }
  538. local_irq_disable();
  539. }
  540. __thread_fpu_begin(tsk);
  541. /*
  542. * Paranoid restore. send a SIGSEGV if we fail to restore the state.
  543. */
  544. if (unlikely(restore_fpu_checking(tsk))) {
  545. __thread_fpu_end(tsk);
  546. force_sig(SIGSEGV, tsk);
  547. return;
  548. }
  549. tsk->fpu_counter++;
  550. }
  551. EXPORT_SYMBOL_GPL(math_state_restore);
  552. dotraplinkage void __kprobes
  553. do_device_not_available(struct pt_regs *regs, long error_code)
  554. {
  555. #ifdef CONFIG_MATH_EMULATION
  556. if (read_cr0() & X86_CR0_EM) {
  557. struct math_emu_info info = { };
  558. conditional_sti(regs);
  559. info.regs = regs;
  560. math_emulate(&info);
  561. return;
  562. }
  563. #endif
  564. math_state_restore(); /* interrupts still off */
  565. #ifdef CONFIG_X86_32
  566. conditional_sti(regs);
  567. #endif
  568. }
  569. #ifdef CONFIG_X86_32
  570. dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
  571. {
  572. siginfo_t info;
  573. local_irq_enable();
  574. info.si_signo = SIGILL;
  575. info.si_errno = 0;
  576. info.si_code = ILL_BADSTK;
  577. info.si_addr = NULL;
  578. if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
  579. X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
  580. return;
  581. do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
  582. &info);
  583. }
  584. #endif
  585. /* Set of traps needed for early debugging. */
  586. void __init early_trap_init(void)
  587. {
  588. set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
  589. /* int3 can be called from all */
  590. set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
  591. set_intr_gate(X86_TRAP_PF, &page_fault);
  592. load_idt(&idt_descr);
  593. }
  594. void __init trap_init(void)
  595. {
  596. int i;
  597. #ifdef CONFIG_EISA
  598. void __iomem *p = early_ioremap(0x0FFFD9, 4);
  599. if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
  600. EISA_bus = 1;
  601. early_iounmap(p, 4);
  602. #endif
  603. set_intr_gate(X86_TRAP_DE, &divide_error);
  604. set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
  605. /* int4 can be called from all */
  606. set_system_intr_gate(X86_TRAP_OF, &overflow);
  607. set_intr_gate(X86_TRAP_BR, &bounds);
  608. set_intr_gate(X86_TRAP_UD, &invalid_op);
  609. set_intr_gate(X86_TRAP_NM, &device_not_available);
  610. #ifdef CONFIG_X86_32
  611. set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
  612. #else
  613. set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
  614. #endif
  615. set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
  616. set_intr_gate(X86_TRAP_TS, &invalid_TSS);
  617. set_intr_gate(X86_TRAP_NP, &segment_not_present);
  618. set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
  619. set_intr_gate(X86_TRAP_GP, &general_protection);
  620. set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
  621. set_intr_gate(X86_TRAP_MF, &coprocessor_error);
  622. set_intr_gate(X86_TRAP_AC, &alignment_check);
  623. #ifdef CONFIG_X86_MCE
  624. set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
  625. #endif
  626. set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
  627. /* Reserve all the builtin and the syscall vector: */
  628. for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
  629. set_bit(i, used_vectors);
  630. #ifdef CONFIG_IA32_EMULATION
  631. set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  632. set_bit(IA32_SYSCALL_VECTOR, used_vectors);
  633. #endif
  634. #ifdef CONFIG_X86_32
  635. set_system_trap_gate(SYSCALL_VECTOR, &system_call);
  636. set_bit(SYSCALL_VECTOR, used_vectors);
  637. #endif
  638. /*
  639. * Should be a barrier for any external CPU state:
  640. */
  641. cpu_init();
  642. x86_init.irqs.trap_init();
  643. #ifdef CONFIG_X86_64
  644. memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
  645. set_nmi_gate(X86_TRAP_DB, &debug);
  646. set_nmi_gate(X86_TRAP_BP, &int3);
  647. #endif
  648. }