traps.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * Handle hardware traps and faults.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/kallsyms.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/kprobes.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/kdebug.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/string.h>
  21. #include <linux/delay.h>
  22. #include <linux/errno.h>
  23. #include <linux/kexec.h>
  24. #include <linux/sched.h>
  25. #include <linux/timer.h>
  26. #include <linux/init.h>
  27. #include <linux/bug.h>
  28. #include <linux/nmi.h>
  29. #include <linux/mm.h>
  30. #include <linux/smp.h>
  31. #include <linux/io.h>
  32. #ifdef CONFIG_EISA
  33. #include <linux/ioport.h>
  34. #include <linux/eisa.h>
  35. #endif
  36. #ifdef CONFIG_MCA
  37. #include <linux/mca.h>
  38. #endif
  39. #if defined(CONFIG_EDAC)
  40. #include <linux/edac.h>
  41. #endif
  42. #include <asm/kmemcheck.h>
  43. #include <asm/stacktrace.h>
  44. #include <asm/processor.h>
  45. #include <asm/debugreg.h>
  46. #include <asm/atomic.h>
  47. #include <asm/system.h>
  48. #include <asm/traps.h>
  49. #include <asm/desc.h>
  50. #include <asm/i387.h>
  51. #include <asm/mce.h>
  52. #include <asm/mach_traps.h>
  53. #ifdef CONFIG_X86_64
  54. #include <asm/x86_init.h>
  55. #include <asm/pgalloc.h>
  56. #include <asm/proto.h>
  57. #else
  58. #include <asm/processor-flags.h>
  59. #include <asm/setup.h>
  60. asmlinkage int system_call(void);
  61. /* Do we ignore FPU interrupts ? */
  62. char ignore_fpu_irq;
  63. /*
  64. * The IDT has to be page-aligned to simplify the Pentium
  65. * F0 0F bug workaround.. We have a special link segment
  66. * for this.
  67. */
  68. gate_desc idt_table[NR_VECTORS]
  69. __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
  70. #endif
  71. DECLARE_BITMAP(used_vectors, NR_VECTORS);
  72. EXPORT_SYMBOL_GPL(used_vectors);
  73. static int ignore_nmis;
  74. static inline void conditional_sti(struct pt_regs *regs)
  75. {
  76. if (regs->flags & X86_EFLAGS_IF)
  77. local_irq_enable();
  78. }
  79. static inline void preempt_conditional_sti(struct pt_regs *regs)
  80. {
  81. inc_preempt_count();
  82. if (regs->flags & X86_EFLAGS_IF)
  83. local_irq_enable();
  84. }
  85. static inline void conditional_cli(struct pt_regs *regs)
  86. {
  87. if (regs->flags & X86_EFLAGS_IF)
  88. local_irq_disable();
  89. }
  90. static inline void preempt_conditional_cli(struct pt_regs *regs)
  91. {
  92. if (regs->flags & X86_EFLAGS_IF)
  93. local_irq_disable();
  94. dec_preempt_count();
  95. }
  96. #ifdef CONFIG_X86_32
  97. static inline void
  98. die_if_kernel(const char *str, struct pt_regs *regs, long err)
  99. {
  100. if (!user_mode_vm(regs))
  101. die(str, regs, err);
  102. }
  103. #endif
  104. static void __kprobes
  105. do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
  106. long error_code, siginfo_t *info)
  107. {
  108. struct task_struct *tsk = current;
  109. #ifdef CONFIG_X86_32
  110. if (regs->flags & X86_VM_MASK) {
  111. /*
  112. * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
  113. * On nmi (interrupt 2), do_trap should not be called.
  114. */
  115. if (trapnr < 6)
  116. goto vm86_trap;
  117. goto trap_signal;
  118. }
  119. #endif
  120. if (!user_mode(regs))
  121. goto kernel_trap;
  122. #ifdef CONFIG_X86_32
  123. trap_signal:
  124. #endif
  125. /*
  126. * We want error_code and trap_no set for userspace faults and
  127. * kernelspace faults which result in die(), but not
  128. * kernelspace faults which are fixed up. die() gives the
  129. * process no chance to handle the signal and notice the
  130. * kernel fault information, so that won't result in polluting
  131. * the information about previously queued, but not yet
  132. * delivered, faults. See also do_general_protection below.
  133. */
  134. tsk->thread.error_code = error_code;
  135. tsk->thread.trap_no = trapnr;
  136. #ifdef CONFIG_X86_64
  137. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  138. printk_ratelimit()) {
  139. printk(KERN_INFO
  140. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  141. tsk->comm, tsk->pid, str,
  142. regs->ip, regs->sp, error_code);
  143. print_vma_addr(" in ", regs->ip);
  144. printk("\n");
  145. }
  146. #endif
  147. if (info)
  148. force_sig_info(signr, info, tsk);
  149. else
  150. force_sig(signr, tsk);
  151. return;
  152. kernel_trap:
  153. if (!fixup_exception(regs)) {
  154. tsk->thread.error_code = error_code;
  155. tsk->thread.trap_no = trapnr;
  156. die(str, regs, error_code);
  157. }
  158. return;
  159. #ifdef CONFIG_X86_32
  160. vm86_trap:
  161. if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
  162. error_code, trapnr))
  163. goto trap_signal;
  164. return;
  165. #endif
  166. }
  167. #define DO_ERROR(trapnr, signr, str, name) \
  168. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  169. { \
  170. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  171. == NOTIFY_STOP) \
  172. return; \
  173. conditional_sti(regs); \
  174. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  175. }
  176. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  177. dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
  178. { \
  179. siginfo_t info; \
  180. info.si_signo = signr; \
  181. info.si_errno = 0; \
  182. info.si_code = sicode; \
  183. info.si_addr = (void __user *)siaddr; \
  184. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  185. == NOTIFY_STOP) \
  186. return; \
  187. conditional_sti(regs); \
  188. do_trap(trapnr, signr, str, regs, error_code, &info); \
  189. }
  190. DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
  191. DO_ERROR(4, SIGSEGV, "overflow", overflow)
  192. DO_ERROR(5, SIGSEGV, "bounds", bounds)
  193. DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
  194. DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  195. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  196. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  197. #ifdef CONFIG_X86_32
  198. DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
  199. #endif
  200. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  201. #ifdef CONFIG_X86_64
  202. /* Runs on IST stack */
  203. dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  204. {
  205. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  206. 12, SIGBUS) == NOTIFY_STOP)
  207. return;
  208. preempt_conditional_sti(regs);
  209. do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  210. preempt_conditional_cli(regs);
  211. }
  212. dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
  213. {
  214. static const char str[] = "double fault";
  215. struct task_struct *tsk = current;
  216. /* Return not checked because double check cannot be ignored */
  217. notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  218. tsk->thread.error_code = error_code;
  219. tsk->thread.trap_no = 8;
  220. /*
  221. * This is always a kernel trap and never fixable (and thus must
  222. * never return).
  223. */
  224. for (;;)
  225. die(str, regs, error_code);
  226. }
  227. #endif
  228. dotraplinkage void __kprobes
  229. do_general_protection(struct pt_regs *regs, long error_code)
  230. {
  231. struct task_struct *tsk;
  232. conditional_sti(regs);
  233. #ifdef CONFIG_X86_32
  234. if (regs->flags & X86_VM_MASK)
  235. goto gp_in_vm86;
  236. #endif
  237. tsk = current;
  238. if (!user_mode(regs))
  239. goto gp_in_kernel;
  240. tsk->thread.error_code = error_code;
  241. tsk->thread.trap_no = 13;
  242. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  243. printk_ratelimit()) {
  244. printk(KERN_INFO
  245. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  246. tsk->comm, task_pid_nr(tsk),
  247. regs->ip, regs->sp, error_code);
  248. print_vma_addr(" in ", regs->ip);
  249. printk("\n");
  250. }
  251. force_sig(SIGSEGV, tsk);
  252. return;
  253. #ifdef CONFIG_X86_32
  254. gp_in_vm86:
  255. local_irq_enable();
  256. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  257. return;
  258. #endif
  259. gp_in_kernel:
  260. if (fixup_exception(regs))
  261. return;
  262. tsk->thread.error_code = error_code;
  263. tsk->thread.trap_no = 13;
  264. if (notify_die(DIE_GPF, "general protection fault", regs,
  265. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  266. return;
  267. die("general protection fault", regs, error_code);
  268. }
  269. static notrace __kprobes void
  270. mem_parity_error(unsigned char reason, struct pt_regs *regs)
  271. {
  272. printk(KERN_EMERG
  273. "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
  274. reason, smp_processor_id());
  275. printk(KERN_EMERG
  276. "You have some hardware problem, likely on the PCI bus.\n");
  277. #if defined(CONFIG_EDAC)
  278. if (edac_handler_set()) {
  279. edac_atomic_assert_error();
  280. return;
  281. }
  282. #endif
  283. if (panic_on_unrecovered_nmi)
  284. panic("NMI: Not continuing");
  285. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  286. /* Clear and disable the memory parity error line. */
  287. reason = (reason & 0xf) | 4;
  288. outb(reason, 0x61);
  289. }
  290. static notrace __kprobes void
  291. io_check_error(unsigned char reason, struct pt_regs *regs)
  292. {
  293. unsigned long i;
  294. printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
  295. show_registers(regs);
  296. if (panic_on_io_nmi)
  297. panic("NMI IOCK error: Not continuing");
  298. /* Re-enable the IOCK line, wait for a few seconds */
  299. reason = (reason & 0xf) | 8;
  300. outb(reason, 0x61);
  301. i = 2000;
  302. while (--i)
  303. udelay(1000);
  304. reason &= ~8;
  305. outb(reason, 0x61);
  306. }
  307. static notrace __kprobes void
  308. unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
  309. {
  310. if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
  311. NOTIFY_STOP)
  312. return;
  313. #ifdef CONFIG_MCA
  314. /*
  315. * Might actually be able to figure out what the guilty party
  316. * is:
  317. */
  318. if (MCA_bus) {
  319. mca_handle_nmi();
  320. return;
  321. }
  322. #endif
  323. printk(KERN_EMERG
  324. "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
  325. reason, smp_processor_id());
  326. printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
  327. if (panic_on_unrecovered_nmi)
  328. panic("NMI: Not continuing");
  329. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  330. }
  331. static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
  332. {
  333. unsigned char reason = 0;
  334. int cpu;
  335. cpu = smp_processor_id();
  336. /* Only the BSP gets external NMIs from the system. */
  337. if (!cpu)
  338. reason = get_nmi_reason();
  339. if (!(reason & 0xc0)) {
  340. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  341. == NOTIFY_STOP)
  342. return;
  343. #ifdef CONFIG_X86_LOCAL_APIC
  344. /*
  345. * Ok, so this is none of the documented NMI sources,
  346. * so it must be the NMI watchdog.
  347. */
  348. if (nmi_watchdog_tick(regs, reason))
  349. return;
  350. if (!do_nmi_callback(regs, cpu))
  351. unknown_nmi_error(reason, regs);
  352. #else
  353. unknown_nmi_error(reason, regs);
  354. #endif
  355. return;
  356. }
  357. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  358. return;
  359. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  360. if (reason & 0x80)
  361. mem_parity_error(reason, regs);
  362. if (reason & 0x40)
  363. io_check_error(reason, regs);
  364. #ifdef CONFIG_X86_32
  365. /*
  366. * Reassert NMI in case it became active meanwhile
  367. * as it's edge-triggered:
  368. */
  369. reassert_nmi();
  370. #endif
  371. }
  372. dotraplinkage notrace __kprobes void
  373. do_nmi(struct pt_regs *regs, long error_code)
  374. {
  375. nmi_enter();
  376. inc_irq_stat(__nmi_count);
  377. if (!ignore_nmis)
  378. default_do_nmi(regs);
  379. nmi_exit();
  380. }
  381. void stop_nmi(void)
  382. {
  383. acpi_nmi_disable();
  384. ignore_nmis++;
  385. }
  386. void restart_nmi(void)
  387. {
  388. ignore_nmis--;
  389. acpi_nmi_enable();
  390. }
  391. /* May run on IST stack. */
  392. dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
  393. {
  394. #ifdef CONFIG_KPROBES
  395. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
  396. == NOTIFY_STOP)
  397. return;
  398. #else
  399. if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
  400. == NOTIFY_STOP)
  401. return;
  402. #endif
  403. preempt_conditional_sti(regs);
  404. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  405. preempt_conditional_cli(regs);
  406. }
  407. #ifdef CONFIG_X86_64
  408. /*
  409. * Help handler running on IST stack to switch back to user stack
  410. * for scheduling or signal handling. The actual stack switch is done in
  411. * entry.S
  412. */
  413. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  414. {
  415. struct pt_regs *regs = eregs;
  416. /* Did already sync */
  417. if (eregs == (struct pt_regs *)eregs->sp)
  418. ;
  419. /* Exception from user space */
  420. else if (user_mode(eregs))
  421. regs = task_pt_regs(current);
  422. /*
  423. * Exception from kernel and interrupts are enabled. Move to
  424. * kernel process stack.
  425. */
  426. else if (eregs->flags & X86_EFLAGS_IF)
  427. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  428. if (eregs != regs)
  429. *regs = *eregs;
  430. return regs;
  431. }
  432. #endif
  433. /*
  434. * Our handling of the processor debug registers is non-trivial.
  435. * We do not clear them on entry and exit from the kernel. Therefore
  436. * it is possible to get a watchpoint trap here from inside the kernel.
  437. * However, the code in ./ptrace.c has ensured that the user can
  438. * only set watchpoints on userspace addresses. Therefore the in-kernel
  439. * watchpoint trap can only occur in code which is reading/writing
  440. * from user space. Such code must not hold kernel locks (since it
  441. * can equally take a page fault), therefore it is safe to call
  442. * force_sig_info even though that claims and releases locks.
  443. *
  444. * Code in ./signal.c ensures that the debug control register
  445. * is restored before we deliver any signal, and therefore that
  446. * user code runs with the correct debug control register even though
  447. * we clear it here.
  448. *
  449. * Being careful here means that we don't have to be as careful in a
  450. * lot of more complicated places (task switching can be a bit lazy
  451. * about restoring all the debug state, and ptrace doesn't have to
  452. * find every occurrence of the TF bit that could be saved away even
  453. * by user code)
  454. *
  455. * May run on IST stack.
  456. */
  457. dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
  458. {
  459. struct task_struct *tsk = current;
  460. unsigned long condition;
  461. int si_code;
  462. get_debugreg(condition, 6);
  463. /* Catch kmemcheck conditions first of all! */
  464. if (condition & DR_STEP && kmemcheck_trap(regs))
  465. return;
  466. /*
  467. * The processor cleared BTF, so don't mark that we need it set.
  468. */
  469. clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
  470. tsk->thread.debugctlmsr = 0;
  471. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  472. SIGTRAP) == NOTIFY_STOP)
  473. return;
  474. /* It's safe to allow irq's after DR6 has been saved */
  475. preempt_conditional_sti(regs);
  476. /* Mask out spurious debug traps due to lazy DR7 setting */
  477. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  478. if (!tsk->thread.debugreg7)
  479. goto clear_dr7;
  480. }
  481. #ifdef CONFIG_X86_32
  482. if (regs->flags & X86_VM_MASK)
  483. goto debug_vm86;
  484. #endif
  485. /* Save debug status register where ptrace can see it */
  486. tsk->thread.debugreg6 = condition;
  487. /*
  488. * Single-stepping through TF: make sure we ignore any events in
  489. * kernel space (but re-enable TF when returning to user mode).
  490. */
  491. if (condition & DR_STEP) {
  492. if (!user_mode(regs))
  493. goto clear_TF_reenable;
  494. }
  495. si_code = get_si_code(condition);
  496. /* Ok, finally something we can handle */
  497. send_sigtrap(tsk, regs, error_code, si_code);
  498. /*
  499. * Disable additional traps. They'll be re-enabled when
  500. * the signal is delivered.
  501. */
  502. clear_dr7:
  503. set_debugreg(0, 7);
  504. preempt_conditional_cli(regs);
  505. return;
  506. #ifdef CONFIG_X86_32
  507. debug_vm86:
  508. /* reenable preemption: handle_vm86_trap() might sleep */
  509. dec_preempt_count();
  510. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
  511. conditional_cli(regs);
  512. return;
  513. #endif
  514. clear_TF_reenable:
  515. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  516. regs->flags &= ~X86_EFLAGS_TF;
  517. preempt_conditional_cli(regs);
  518. return;
  519. }
  520. #ifdef CONFIG_X86_64
  521. static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
  522. {
  523. if (fixup_exception(regs))
  524. return 1;
  525. notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
  526. /* Illegal floating point operation in the kernel */
  527. current->thread.trap_no = trapnr;
  528. die(str, regs, 0);
  529. return 0;
  530. }
  531. #endif
  532. /*
  533. * Note that we play around with the 'TS' bit in an attempt to get
  534. * the correct behaviour even in the presence of the asynchronous
  535. * IRQ13 behaviour
  536. */
  537. void math_error(void __user *ip)
  538. {
  539. struct task_struct *task;
  540. siginfo_t info;
  541. unsigned short cwd, swd, err;
  542. /*
  543. * Save the info for the exception handler and clear the error.
  544. */
  545. task = current;
  546. save_init_fpu(task);
  547. task->thread.trap_no = 16;
  548. task->thread.error_code = 0;
  549. info.si_signo = SIGFPE;
  550. info.si_errno = 0;
  551. info.si_addr = ip;
  552. /*
  553. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  554. * status. 0x3f is the exception bits in these regs, 0x200 is the
  555. * C1 reg you need in case of a stack fault, 0x040 is the stack
  556. * fault bit. We should only be taking one exception at a time,
  557. * so if this combination doesn't produce any single exception,
  558. * then we have a bad program that isn't synchronizing its FPU usage
  559. * and it will suffer the consequences since we won't be able to
  560. * fully reproduce the context of the exception
  561. */
  562. cwd = get_fpu_cwd(task);
  563. swd = get_fpu_swd(task);
  564. err = swd & ~cwd;
  565. if (err & 0x001) { /* Invalid op */
  566. /*
  567. * swd & 0x240 == 0x040: Stack Underflow
  568. * swd & 0x240 == 0x240: Stack Overflow
  569. * User must clear the SF bit (0x40) if set
  570. */
  571. info.si_code = FPE_FLTINV;
  572. } else if (err & 0x004) { /* Divide by Zero */
  573. info.si_code = FPE_FLTDIV;
  574. } else if (err & 0x008) { /* Overflow */
  575. info.si_code = FPE_FLTOVF;
  576. } else if (err & 0x012) { /* Denormal, Underflow */
  577. info.si_code = FPE_FLTUND;
  578. } else if (err & 0x020) { /* Precision */
  579. info.si_code = FPE_FLTRES;
  580. } else {
  581. /*
  582. * If we're using IRQ 13, or supposedly even some trap 16
  583. * implementations, it's possible we get a spurious trap...
  584. */
  585. return; /* Spurious trap, no error */
  586. }
  587. force_sig_info(SIGFPE, &info, task);
  588. }
  589. dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
  590. {
  591. conditional_sti(regs);
  592. #ifdef CONFIG_X86_32
  593. ignore_fpu_irq = 1;
  594. #else
  595. if (!user_mode(regs) &&
  596. kernel_math_error(regs, "kernel x87 math error", 16))
  597. return;
  598. #endif
  599. math_error((void __user *)regs->ip);
  600. }
  601. static void simd_math_error(void __user *ip)
  602. {
  603. struct task_struct *task;
  604. siginfo_t info;
  605. unsigned short mxcsr;
  606. /*
  607. * Save the info for the exception handler and clear the error.
  608. */
  609. task = current;
  610. save_init_fpu(task);
  611. task->thread.trap_no = 19;
  612. task->thread.error_code = 0;
  613. info.si_signo = SIGFPE;
  614. info.si_errno = 0;
  615. info.si_code = __SI_FAULT;
  616. info.si_addr = ip;
  617. /*
  618. * The SIMD FPU exceptions are handled a little differently, as there
  619. * is only a single status/control register. Thus, to determine which
  620. * unmasked exception was caught we must mask the exception mask bits
  621. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  622. */
  623. mxcsr = get_fpu_mxcsr(task);
  624. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  625. case 0x000:
  626. default:
  627. break;
  628. case 0x001: /* Invalid Op */
  629. info.si_code = FPE_FLTINV;
  630. break;
  631. case 0x002: /* Denormalize */
  632. case 0x010: /* Underflow */
  633. info.si_code = FPE_FLTUND;
  634. break;
  635. case 0x004: /* Zero Divide */
  636. info.si_code = FPE_FLTDIV;
  637. break;
  638. case 0x008: /* Overflow */
  639. info.si_code = FPE_FLTOVF;
  640. break;
  641. case 0x020: /* Precision */
  642. info.si_code = FPE_FLTRES;
  643. break;
  644. }
  645. force_sig_info(SIGFPE, &info, task);
  646. }
  647. dotraplinkage void
  648. do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
  649. {
  650. conditional_sti(regs);
  651. #ifdef CONFIG_X86_32
  652. if (cpu_has_xmm) {
  653. /* Handle SIMD FPU exceptions on PIII+ processors. */
  654. ignore_fpu_irq = 1;
  655. simd_math_error((void __user *)regs->ip);
  656. return;
  657. }
  658. /*
  659. * Handle strange cache flush from user space exception
  660. * in all other cases. This is undocumented behaviour.
  661. */
  662. if (regs->flags & X86_VM_MASK) {
  663. handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
  664. return;
  665. }
  666. current->thread.trap_no = 19;
  667. current->thread.error_code = error_code;
  668. die_if_kernel("cache flush denied", regs, error_code);
  669. force_sig(SIGSEGV, current);
  670. #else
  671. if (!user_mode(regs) &&
  672. kernel_math_error(regs, "kernel simd math error", 19))
  673. return;
  674. simd_math_error((void __user *)regs->ip);
  675. #endif
  676. }
  677. dotraplinkage void
  678. do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
  679. {
  680. conditional_sti(regs);
  681. #if 0
  682. /* No need to warn about this any longer. */
  683. printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  684. #endif
  685. }
  686. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  687. {
  688. }
  689. asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
  690. {
  691. }
  692. /*
  693. * __math_state_restore assumes that cr0.TS is already clear and the
  694. * fpu state is all ready for use. Used during context switch.
  695. */
  696. void __math_state_restore(void)
  697. {
  698. struct thread_info *thread = current_thread_info();
  699. struct task_struct *tsk = thread->task;
  700. /*
  701. * Paranoid restore. send a SIGSEGV if we fail to restore the state.
  702. */
  703. if (unlikely(restore_fpu_checking(tsk))) {
  704. stts();
  705. force_sig(SIGSEGV, tsk);
  706. return;
  707. }
  708. thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
  709. tsk->fpu_counter++;
  710. }
  711. /*
  712. * 'math_state_restore()' saves the current math information in the
  713. * old math state array, and gets the new ones from the current task
  714. *
  715. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  716. * Don't touch unless you *really* know how it works.
  717. *
  718. * Must be called with kernel preemption disabled (in this case,
  719. * local interrupts are disabled at the call-site in entry.S).
  720. */
  721. asmlinkage void math_state_restore(void)
  722. {
  723. struct thread_info *thread = current_thread_info();
  724. struct task_struct *tsk = thread->task;
  725. if (!tsk_used_math(tsk)) {
  726. local_irq_enable();
  727. /*
  728. * does a slab alloc which can sleep
  729. */
  730. if (init_fpu(tsk)) {
  731. /*
  732. * ran out of memory!
  733. */
  734. do_group_exit(SIGKILL);
  735. return;
  736. }
  737. local_irq_disable();
  738. }
  739. clts(); /* Allow maths ops (or we recurse) */
  740. __math_state_restore();
  741. }
  742. EXPORT_SYMBOL_GPL(math_state_restore);
  743. #ifndef CONFIG_MATH_EMULATION
  744. void math_emulate(struct math_emu_info *info)
  745. {
  746. printk(KERN_EMERG
  747. "math-emulation not enabled and no coprocessor found.\n");
  748. printk(KERN_EMERG "killing %s.\n", current->comm);
  749. force_sig(SIGFPE, current);
  750. schedule();
  751. }
  752. #endif /* CONFIG_MATH_EMULATION */
  753. dotraplinkage void __kprobes
  754. do_device_not_available(struct pt_regs *regs, long error_code)
  755. {
  756. #ifdef CONFIG_X86_32
  757. if (read_cr0() & X86_CR0_EM) {
  758. struct math_emu_info info = { };
  759. conditional_sti(regs);
  760. info.regs = regs;
  761. math_emulate(&info);
  762. } else {
  763. math_state_restore(); /* interrupts still off */
  764. conditional_sti(regs);
  765. }
  766. #else
  767. math_state_restore();
  768. #endif
  769. }
  770. #ifdef CONFIG_X86_32
  771. dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
  772. {
  773. siginfo_t info;
  774. local_irq_enable();
  775. info.si_signo = SIGILL;
  776. info.si_errno = 0;
  777. info.si_code = ILL_BADSTK;
  778. info.si_addr = NULL;
  779. if (notify_die(DIE_TRAP, "iret exception",
  780. regs, error_code, 32, SIGILL) == NOTIFY_STOP)
  781. return;
  782. do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
  783. }
  784. #endif
  785. void __init trap_init(void)
  786. {
  787. int i;
  788. #ifdef CONFIG_EISA
  789. void __iomem *p = early_ioremap(0x0FFFD9, 4);
  790. if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
  791. EISA_bus = 1;
  792. early_iounmap(p, 4);
  793. #endif
  794. set_intr_gate(0, &divide_error);
  795. set_intr_gate_ist(1, &debug, DEBUG_STACK);
  796. set_intr_gate_ist(2, &nmi, NMI_STACK);
  797. /* int3 can be called from all */
  798. set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
  799. /* int4 can be called from all */
  800. set_system_intr_gate(4, &overflow);
  801. set_intr_gate(5, &bounds);
  802. set_intr_gate(6, &invalid_op);
  803. set_intr_gate(7, &device_not_available);
  804. #ifdef CONFIG_X86_32
  805. set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
  806. #else
  807. set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
  808. #endif
  809. set_intr_gate(9, &coprocessor_segment_overrun);
  810. set_intr_gate(10, &invalid_TSS);
  811. set_intr_gate(11, &segment_not_present);
  812. set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
  813. set_intr_gate(13, &general_protection);
  814. set_intr_gate(14, &page_fault);
  815. set_intr_gate(15, &spurious_interrupt_bug);
  816. set_intr_gate(16, &coprocessor_error);
  817. set_intr_gate(17, &alignment_check);
  818. #ifdef CONFIG_X86_MCE
  819. set_intr_gate_ist(18, &machine_check, MCE_STACK);
  820. #endif
  821. set_intr_gate(19, &simd_coprocessor_error);
  822. /* Reserve all the builtin and the syscall vector: */
  823. for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
  824. set_bit(i, used_vectors);
  825. #ifdef CONFIG_IA32_EMULATION
  826. set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  827. set_bit(IA32_SYSCALL_VECTOR, used_vectors);
  828. #endif
  829. #ifdef CONFIG_X86_32
  830. if (cpu_has_fxsr) {
  831. printk(KERN_INFO "Enabling fast FPU save and restore... ");
  832. set_in_cr4(X86_CR4_OSFXSR);
  833. printk("done.\n");
  834. }
  835. if (cpu_has_xmm) {
  836. printk(KERN_INFO
  837. "Enabling unmasked SIMD FPU exception support... ");
  838. set_in_cr4(X86_CR4_OSXMMEXCPT);
  839. printk("done.\n");
  840. }
  841. set_system_trap_gate(SYSCALL_VECTOR, &system_call);
  842. set_bit(SYSCALL_VECTOR, used_vectors);
  843. #endif
  844. /*
  845. * Should be a barrier for any external CPU state:
  846. */
  847. cpu_init();
  848. x86_init.irqs.trap_init();
  849. }