traps.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. * linux/arch/i386/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Pentium III FXSR, SSE support
  7. * Gareth Hughes <gareth@valinux.com>, May 2000
  8. */
  9. /*
  10. * 'Traps.c' handles hardware traps and faults after we have saved some
  11. * state in 'asm.s'.
  12. */
  13. #include <linux/config.h>
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/errno.h>
  18. #include <linux/timer.h>
  19. #include <linux/mm.h>
  20. #include <linux/init.h>
  21. #include <linux/delay.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/highmem.h>
  25. #include <linux/kallsyms.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/utsname.h>
  28. #include <linux/kprobes.h>
  29. #include <linux/kexec.h>
  30. #ifdef CONFIG_EISA
  31. #include <linux/ioport.h>
  32. #include <linux/eisa.h>
  33. #endif
  34. #ifdef CONFIG_MCA
  35. #include <linux/mca.h>
  36. #endif
  37. #include <asm/processor.h>
  38. #include <asm/system.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/io.h>
  41. #include <asm/atomic.h>
  42. #include <asm/debugreg.h>
  43. #include <asm/desc.h>
  44. #include <asm/i387.h>
  45. #include <asm/nmi.h>
  46. #include <asm/smp.h>
  47. #include <asm/arch_hooks.h>
  48. #include <asm/kdebug.h>
  49. #include <linux/irq.h>
  50. #include <linux/module.h>
  51. #include "mach_traps.h"
  52. asmlinkage int system_call(void);
  53. struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
  54. { 0, 0 }, { 0, 0 } };
  55. /* Do we ignore FPU interrupts ? */
  56. char ignore_fpu_irq = 0;
  57. /*
  58. * The IDT has to be page-aligned to simplify the Pentium
  59. * F0 0F bug workaround.. We have a special link segment
  60. * for this.
  61. */
  62. struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
  63. asmlinkage void divide_error(void);
  64. asmlinkage void debug(void);
  65. asmlinkage void nmi(void);
  66. asmlinkage void int3(void);
  67. asmlinkage void overflow(void);
  68. asmlinkage void bounds(void);
  69. asmlinkage void invalid_op(void);
  70. asmlinkage void device_not_available(void);
  71. asmlinkage void coprocessor_segment_overrun(void);
  72. asmlinkage void invalid_TSS(void);
  73. asmlinkage void segment_not_present(void);
  74. asmlinkage void stack_segment(void);
  75. asmlinkage void general_protection(void);
  76. asmlinkage void page_fault(void);
  77. asmlinkage void coprocessor_error(void);
  78. asmlinkage void simd_coprocessor_error(void);
  79. asmlinkage void alignment_check(void);
  80. asmlinkage void spurious_interrupt_bug(void);
  81. asmlinkage void machine_check(void);
  82. static int kstack_depth_to_print = 24;
  83. struct notifier_block *i386die_chain;
  84. static DEFINE_SPINLOCK(die_notifier_lock);
  85. int register_die_notifier(struct notifier_block *nb)
  86. {
  87. int err = 0;
  88. unsigned long flags;
  89. spin_lock_irqsave(&die_notifier_lock, flags);
  90. err = notifier_chain_register(&i386die_chain, nb);
  91. spin_unlock_irqrestore(&die_notifier_lock, flags);
  92. return err;
  93. }
  94. EXPORT_SYMBOL(register_die_notifier);
  95. static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
  96. {
  97. return p > (void *)tinfo &&
  98. p < (void *)tinfo + THREAD_SIZE - 3;
  99. }
  100. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  101. unsigned long *stack, unsigned long ebp)
  102. {
  103. unsigned long addr;
  104. #ifdef CONFIG_FRAME_POINTER
  105. while (valid_stack_ptr(tinfo, (void *)ebp)) {
  106. addr = *(unsigned long *)(ebp + 4);
  107. printk(" [<%08lx>] ", addr);
  108. print_symbol("%s", addr);
  109. printk("\n");
  110. ebp = *(unsigned long *)ebp;
  111. }
  112. #else
  113. while (valid_stack_ptr(tinfo, stack)) {
  114. addr = *stack++;
  115. if (__kernel_text_address(addr)) {
  116. printk(" [<%08lx>]", addr);
  117. print_symbol(" %s", addr);
  118. printk("\n");
  119. }
  120. }
  121. #endif
  122. return ebp;
  123. }
  124. void show_trace(struct task_struct *task, unsigned long * stack)
  125. {
  126. unsigned long ebp;
  127. if (!task)
  128. task = current;
  129. if (task == current) {
  130. /* Grab ebp right from our regs */
  131. asm ("movl %%ebp, %0" : "=r" (ebp) : );
  132. } else {
  133. /* ebp is the last reg pushed by switch_to */
  134. ebp = *(unsigned long *) task->thread.esp;
  135. }
  136. while (1) {
  137. struct thread_info *context;
  138. context = (struct thread_info *)
  139. ((unsigned long)stack & (~(THREAD_SIZE - 1)));
  140. ebp = print_context_stack(context, stack, ebp);
  141. stack = (unsigned long*)context->previous_esp;
  142. if (!stack)
  143. break;
  144. printk(" =======================\n");
  145. }
  146. }
  147. void show_stack(struct task_struct *task, unsigned long *esp)
  148. {
  149. unsigned long *stack;
  150. int i;
  151. if (esp == NULL) {
  152. if (task)
  153. esp = (unsigned long*)task->thread.esp;
  154. else
  155. esp = (unsigned long *)&esp;
  156. }
  157. stack = esp;
  158. for(i = 0; i < kstack_depth_to_print; i++) {
  159. if (kstack_end(stack))
  160. break;
  161. if (i && ((i % 8) == 0))
  162. printk("\n ");
  163. printk("%08lx ", *stack++);
  164. }
  165. printk("\nCall Trace:\n");
  166. show_trace(task, esp);
  167. }
  168. /*
  169. * The architecture-independent dump_stack generator
  170. */
  171. void dump_stack(void)
  172. {
  173. unsigned long stack;
  174. show_trace(current, &stack);
  175. }
  176. EXPORT_SYMBOL(dump_stack);
  177. void show_registers(struct pt_regs *regs)
  178. {
  179. int i;
  180. int in_kernel = 1;
  181. unsigned long esp;
  182. unsigned short ss;
  183. esp = (unsigned long) (&regs->esp);
  184. ss = __KERNEL_DS;
  185. if (user_mode(regs)) {
  186. in_kernel = 0;
  187. esp = regs->esp;
  188. ss = regs->xss & 0xffff;
  189. }
  190. print_modules();
  191. printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
  192. " (%s) \n",
  193. smp_processor_id(), 0xffff & regs->xcs, regs->eip,
  194. print_tainted(), regs->eflags, system_utsname.release);
  195. print_symbol("EIP is at %s\n", regs->eip);
  196. printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  197. regs->eax, regs->ebx, regs->ecx, regs->edx);
  198. printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  199. regs->esi, regs->edi, regs->ebp, esp);
  200. printk("ds: %04x es: %04x ss: %04x\n",
  201. regs->xds & 0xffff, regs->xes & 0xffff, ss);
  202. printk("Process %s (pid: %d, threadinfo=%p task=%p)",
  203. current->comm, current->pid, current_thread_info(), current);
  204. /*
  205. * When in-kernel, we also print out the stack and code at the
  206. * time of the fault..
  207. */
  208. if (in_kernel) {
  209. u8 __user *eip;
  210. printk("\nStack: ");
  211. show_stack(NULL, (unsigned long*)esp);
  212. printk("Code: ");
  213. eip = (u8 __user *)regs->eip - 43;
  214. for (i = 0; i < 64; i++, eip++) {
  215. unsigned char c;
  216. if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
  217. printk(" Bad EIP value.");
  218. break;
  219. }
  220. if (eip == (u8 __user *)regs->eip)
  221. printk("<%02x> ", c);
  222. else
  223. printk("%02x ", c);
  224. }
  225. }
  226. printk("\n");
  227. }
  228. static void handle_BUG(struct pt_regs *regs)
  229. {
  230. unsigned short ud2;
  231. unsigned short line;
  232. char *file;
  233. char c;
  234. unsigned long eip;
  235. if (user_mode(regs))
  236. goto no_bug; /* Not in kernel */
  237. eip = regs->eip;
  238. if (eip < PAGE_OFFSET)
  239. goto no_bug;
  240. if (__get_user(ud2, (unsigned short __user *)eip))
  241. goto no_bug;
  242. if (ud2 != 0x0b0f)
  243. goto no_bug;
  244. if (__get_user(line, (unsigned short __user *)(eip + 2)))
  245. goto bug;
  246. if (__get_user(file, (char * __user *)(eip + 4)) ||
  247. (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
  248. file = "<bad filename>";
  249. printk("------------[ cut here ]------------\n");
  250. printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
  251. no_bug:
  252. return;
  253. /* Here we know it was a BUG but file-n-line is unavailable */
  254. bug:
  255. printk("Kernel BUG\n");
  256. }
  257. /* This is gone through when something in the kernel
  258. * has done something bad and is about to be terminated.
  259. */
  260. void die(const char * str, struct pt_regs * regs, long err)
  261. {
  262. static struct {
  263. spinlock_t lock;
  264. u32 lock_owner;
  265. int lock_owner_depth;
  266. } die = {
  267. .lock = SPIN_LOCK_UNLOCKED,
  268. .lock_owner = -1,
  269. .lock_owner_depth = 0
  270. };
  271. static int die_counter;
  272. if (die.lock_owner != raw_smp_processor_id()) {
  273. console_verbose();
  274. spin_lock_irq(&die.lock);
  275. die.lock_owner = smp_processor_id();
  276. die.lock_owner_depth = 0;
  277. bust_spinlocks(1);
  278. }
  279. if (++die.lock_owner_depth < 3) {
  280. int nl = 0;
  281. handle_BUG(regs);
  282. printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
  283. #ifdef CONFIG_PREEMPT
  284. printk("PREEMPT ");
  285. nl = 1;
  286. #endif
  287. #ifdef CONFIG_SMP
  288. printk("SMP ");
  289. nl = 1;
  290. #endif
  291. #ifdef CONFIG_DEBUG_PAGEALLOC
  292. printk("DEBUG_PAGEALLOC");
  293. nl = 1;
  294. #endif
  295. if (nl)
  296. printk("\n");
  297. notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
  298. show_registers(regs);
  299. } else
  300. printk(KERN_ERR "Recursive die() failure, output suppressed\n");
  301. bust_spinlocks(0);
  302. die.lock_owner = -1;
  303. spin_unlock_irq(&die.lock);
  304. if (kexec_should_crash(current))
  305. crash_kexec(regs);
  306. if (in_interrupt())
  307. panic("Fatal exception in interrupt");
  308. if (panic_on_oops) {
  309. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  310. ssleep(5);
  311. panic("Fatal exception");
  312. }
  313. do_exit(SIGSEGV);
  314. }
  315. static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  316. {
  317. if (!user_mode_vm(regs))
  318. die(str, regs, err);
  319. }
  320. static void do_trap(int trapnr, int signr, char *str, int vm86,
  321. struct pt_regs * regs, long error_code, siginfo_t *info)
  322. {
  323. struct task_struct *tsk = current;
  324. tsk->thread.error_code = error_code;
  325. tsk->thread.trap_no = trapnr;
  326. if (regs->eflags & VM_MASK) {
  327. if (vm86)
  328. goto vm86_trap;
  329. goto trap_signal;
  330. }
  331. if (!user_mode(regs))
  332. goto kernel_trap;
  333. trap_signal: {
  334. if (info)
  335. force_sig_info(signr, info, tsk);
  336. else
  337. force_sig(signr, tsk);
  338. return;
  339. }
  340. kernel_trap: {
  341. if (!fixup_exception(regs))
  342. die(str, regs, error_code);
  343. return;
  344. }
  345. vm86_trap: {
  346. int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
  347. if (ret) goto trap_signal;
  348. return;
  349. }
  350. }
  351. #define DO_ERROR(trapnr, signr, str, name) \
  352. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  353. { \
  354. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  355. == NOTIFY_STOP) \
  356. return; \
  357. do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
  358. }
  359. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  360. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  361. { \
  362. siginfo_t info; \
  363. info.si_signo = signr; \
  364. info.si_errno = 0; \
  365. info.si_code = sicode; \
  366. info.si_addr = (void __user *)siaddr; \
  367. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  368. == NOTIFY_STOP) \
  369. return; \
  370. do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
  371. }
  372. #define DO_VM86_ERROR(trapnr, signr, str, name) \
  373. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  374. { \
  375. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  376. == NOTIFY_STOP) \
  377. return; \
  378. do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
  379. }
  380. #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  381. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  382. { \
  383. siginfo_t info; \
  384. info.si_signo = signr; \
  385. info.si_errno = 0; \
  386. info.si_code = sicode; \
  387. info.si_addr = (void __user *)siaddr; \
  388. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  389. == NOTIFY_STOP) \
  390. return; \
  391. do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
  392. }
  393. DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
  394. #ifndef CONFIG_KPROBES
  395. DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
  396. #endif
  397. DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
  398. DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
  399. DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
  400. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  401. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  402. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  403. DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
  404. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  405. DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
  406. fastcall void do_general_protection(struct pt_regs * regs, long error_code)
  407. {
  408. int cpu = get_cpu();
  409. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  410. struct thread_struct *thread = &current->thread;
  411. /*
  412. * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
  413. * invalid offset set (the LAZY one) and the faulting thread has
  414. * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
  415. * and we set the offset field correctly. Then we let the CPU to
  416. * restart the faulting instruction.
  417. */
  418. if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
  419. thread->io_bitmap_ptr) {
  420. memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
  421. thread->io_bitmap_max);
  422. /*
  423. * If the previously set map was extending to higher ports
  424. * than the current one, pad extra space with 0xff (no access).
  425. */
  426. if (thread->io_bitmap_max < tss->io_bitmap_max)
  427. memset((char *) tss->io_bitmap +
  428. thread->io_bitmap_max, 0xff,
  429. tss->io_bitmap_max - thread->io_bitmap_max);
  430. tss->io_bitmap_max = thread->io_bitmap_max;
  431. tss->io_bitmap_base = IO_BITMAP_OFFSET;
  432. put_cpu();
  433. return;
  434. }
  435. put_cpu();
  436. current->thread.error_code = error_code;
  437. current->thread.trap_no = 13;
  438. if (regs->eflags & VM_MASK)
  439. goto gp_in_vm86;
  440. if (!user_mode(regs))
  441. goto gp_in_kernel;
  442. current->thread.error_code = error_code;
  443. current->thread.trap_no = 13;
  444. force_sig(SIGSEGV, current);
  445. return;
  446. gp_in_vm86:
  447. local_irq_enable();
  448. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  449. return;
  450. gp_in_kernel:
  451. if (!fixup_exception(regs)) {
  452. if (notify_die(DIE_GPF, "general protection fault", regs,
  453. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  454. return;
  455. die("general protection fault", regs, error_code);
  456. }
  457. }
  458. static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
  459. {
  460. printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
  461. printk("You probably have a hardware problem with your RAM chips\n");
  462. /* Clear and disable the memory parity error line. */
  463. clear_mem_error(reason);
  464. }
  465. static void io_check_error(unsigned char reason, struct pt_regs * regs)
  466. {
  467. unsigned long i;
  468. printk("NMI: IOCK error (debug interrupt?)\n");
  469. show_registers(regs);
  470. /* Re-enable the IOCK line, wait for a few seconds */
  471. reason = (reason & 0xf) | 8;
  472. outb(reason, 0x61);
  473. i = 2000;
  474. while (--i) udelay(1000);
  475. reason &= ~8;
  476. outb(reason, 0x61);
  477. }
  478. static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  479. {
  480. #ifdef CONFIG_MCA
  481. /* Might actually be able to figure out what the guilty party
  482. * is. */
  483. if( MCA_bus ) {
  484. mca_handle_nmi();
  485. return;
  486. }
  487. #endif
  488. printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
  489. reason, smp_processor_id());
  490. printk("Dazed and confused, but trying to continue\n");
  491. printk("Do you have a strange power saving mode enabled?\n");
  492. }
  493. static DEFINE_SPINLOCK(nmi_print_lock);
  494. void die_nmi (struct pt_regs *regs, const char *msg)
  495. {
  496. spin_lock(&nmi_print_lock);
  497. /*
  498. * We are in trouble anyway, lets at least try
  499. * to get a message out.
  500. */
  501. bust_spinlocks(1);
  502. printk(msg);
  503. printk(" on CPU%d, eip %08lx, registers:\n",
  504. smp_processor_id(), regs->eip);
  505. show_registers(regs);
  506. printk("console shuts up ...\n");
  507. console_silent();
  508. spin_unlock(&nmi_print_lock);
  509. bust_spinlocks(0);
  510. /* If we are in kernel we are probably nested up pretty bad
  511. * and might aswell get out now while we still can.
  512. */
  513. if (!user_mode(regs)) {
  514. current->thread.trap_no = 2;
  515. crash_kexec(regs);
  516. }
  517. do_exit(SIGSEGV);
  518. }
  519. static void default_do_nmi(struct pt_regs * regs)
  520. {
  521. unsigned char reason = 0;
  522. /* Only the BSP gets external NMIs from the system. */
  523. if (!smp_processor_id())
  524. reason = get_nmi_reason();
  525. if (!(reason & 0xc0)) {
  526. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
  527. == NOTIFY_STOP)
  528. return;
  529. #ifdef CONFIG_X86_LOCAL_APIC
  530. /*
  531. * Ok, so this is none of the documented NMI sources,
  532. * so it must be the NMI watchdog.
  533. */
  534. if (nmi_watchdog) {
  535. nmi_watchdog_tick(regs);
  536. return;
  537. }
  538. #endif
  539. unknown_nmi_error(reason, regs);
  540. return;
  541. }
  542. if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
  543. return;
  544. if (reason & 0x80)
  545. mem_parity_error(reason, regs);
  546. if (reason & 0x40)
  547. io_check_error(reason, regs);
  548. /*
  549. * Reassert NMI in case it became active meanwhile
  550. * as it's edge-triggered.
  551. */
  552. reassert_nmi();
  553. }
  554. static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
  555. {
  556. return 0;
  557. }
  558. static nmi_callback_t nmi_callback = dummy_nmi_callback;
  559. fastcall void do_nmi(struct pt_regs * regs, long error_code)
  560. {
  561. int cpu;
  562. nmi_enter();
  563. cpu = smp_processor_id();
  564. #ifdef CONFIG_HOTPLUG_CPU
  565. if (!cpu_online(cpu)) {
  566. nmi_exit();
  567. return;
  568. }
  569. #endif
  570. ++nmi_count(cpu);
  571. if (!nmi_callback(regs, cpu))
  572. default_do_nmi(regs);
  573. nmi_exit();
  574. }
  575. void set_nmi_callback(nmi_callback_t callback)
  576. {
  577. nmi_callback = callback;
  578. }
  579. EXPORT_SYMBOL_GPL(set_nmi_callback);
  580. void unset_nmi_callback(void)
  581. {
  582. nmi_callback = dummy_nmi_callback;
  583. }
  584. EXPORT_SYMBOL_GPL(unset_nmi_callback);
  585. #ifdef CONFIG_KPROBES
  586. fastcall void do_int3(struct pt_regs *regs, long error_code)
  587. {
  588. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
  589. == NOTIFY_STOP)
  590. return;
  591. /* This is an interrupt gate, because kprobes wants interrupts
  592. disabled. Normal trap handlers don't. */
  593. restore_interrupts(regs);
  594. do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
  595. }
  596. #endif
  597. /*
  598. * Our handling of the processor debug registers is non-trivial.
  599. * We do not clear them on entry and exit from the kernel. Therefore
  600. * it is possible to get a watchpoint trap here from inside the kernel.
  601. * However, the code in ./ptrace.c has ensured that the user can
  602. * only set watchpoints on userspace addresses. Therefore the in-kernel
  603. * watchpoint trap can only occur in code which is reading/writing
  604. * from user space. Such code must not hold kernel locks (since it
  605. * can equally take a page fault), therefore it is safe to call
  606. * force_sig_info even though that claims and releases locks.
  607. *
  608. * Code in ./signal.c ensures that the debug control register
  609. * is restored before we deliver any signal, and therefore that
  610. * user code runs with the correct debug control register even though
  611. * we clear it here.
  612. *
  613. * Being careful here means that we don't have to be as careful in a
  614. * lot of more complicated places (task switching can be a bit lazy
  615. * about restoring all the debug state, and ptrace doesn't have to
  616. * find every occurrence of the TF bit that could be saved away even
  617. * by user code)
  618. */
  619. fastcall void do_debug(struct pt_regs * regs, long error_code)
  620. {
  621. unsigned int condition;
  622. struct task_struct *tsk = current;
  623. get_debugreg(condition, 6);
  624. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  625. SIGTRAP) == NOTIFY_STOP)
  626. return;
  627. /* It's safe to allow irq's after DR6 has been saved */
  628. if (regs->eflags & X86_EFLAGS_IF)
  629. local_irq_enable();
  630. /* Mask out spurious debug traps due to lazy DR7 setting */
  631. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  632. if (!tsk->thread.debugreg[7])
  633. goto clear_dr7;
  634. }
  635. if (regs->eflags & VM_MASK)
  636. goto debug_vm86;
  637. /* Save debug status register where ptrace can see it */
  638. tsk->thread.debugreg[6] = condition;
  639. /*
  640. * Single-stepping through TF: make sure we ignore any events in
  641. * kernel space (but re-enable TF when returning to user mode).
  642. */
  643. if (condition & DR_STEP) {
  644. /*
  645. * We already checked v86 mode above, so we can
  646. * check for kernel mode by just checking the CPL
  647. * of CS.
  648. */
  649. if (!user_mode(regs))
  650. goto clear_TF_reenable;
  651. }
  652. /* Ok, finally something we can handle */
  653. send_sigtrap(tsk, regs, error_code);
  654. /* Disable additional traps. They'll be re-enabled when
  655. * the signal is delivered.
  656. */
  657. clear_dr7:
  658. set_debugreg(0, 7);
  659. return;
  660. debug_vm86:
  661. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
  662. return;
  663. clear_TF_reenable:
  664. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  665. regs->eflags &= ~TF_MASK;
  666. return;
  667. }
  668. /*
  669. * Note that we play around with the 'TS' bit in an attempt to get
  670. * the correct behaviour even in the presence of the asynchronous
  671. * IRQ13 behaviour
  672. */
  673. void math_error(void __user *eip)
  674. {
  675. struct task_struct * task;
  676. siginfo_t info;
  677. unsigned short cwd, swd;
  678. /*
  679. * Save the info for the exception handler and clear the error.
  680. */
  681. task = current;
  682. save_init_fpu(task);
  683. task->thread.trap_no = 16;
  684. task->thread.error_code = 0;
  685. info.si_signo = SIGFPE;
  686. info.si_errno = 0;
  687. info.si_code = __SI_FAULT;
  688. info.si_addr = eip;
  689. /*
  690. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  691. * status. 0x3f is the exception bits in these regs, 0x200 is the
  692. * C1 reg you need in case of a stack fault, 0x040 is the stack
  693. * fault bit. We should only be taking one exception at a time,
  694. * so if this combination doesn't produce any single exception,
  695. * then we have a bad program that isn't syncronizing its FPU usage
  696. * and it will suffer the consequences since we won't be able to
  697. * fully reproduce the context of the exception
  698. */
  699. cwd = get_fpu_cwd(task);
  700. swd = get_fpu_swd(task);
  701. switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
  702. case 0x000:
  703. default:
  704. break;
  705. case 0x001: /* Invalid Op */
  706. case 0x041: /* Stack Fault */
  707. case 0x241: /* Stack Fault | Direction */
  708. info.si_code = FPE_FLTINV;
  709. /* Should we clear the SF or let user space do it ???? */
  710. break;
  711. case 0x002: /* Denormalize */
  712. case 0x010: /* Underflow */
  713. info.si_code = FPE_FLTUND;
  714. break;
  715. case 0x004: /* Zero Divide */
  716. info.si_code = FPE_FLTDIV;
  717. break;
  718. case 0x008: /* Overflow */
  719. info.si_code = FPE_FLTOVF;
  720. break;
  721. case 0x020: /* Precision */
  722. info.si_code = FPE_FLTRES;
  723. break;
  724. }
  725. force_sig_info(SIGFPE, &info, task);
  726. }
  727. fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
  728. {
  729. ignore_fpu_irq = 1;
  730. math_error((void __user *)regs->eip);
  731. }
  732. static void simd_math_error(void __user *eip)
  733. {
  734. struct task_struct * task;
  735. siginfo_t info;
  736. unsigned short mxcsr;
  737. /*
  738. * Save the info for the exception handler and clear the error.
  739. */
  740. task = current;
  741. save_init_fpu(task);
  742. task->thread.trap_no = 19;
  743. task->thread.error_code = 0;
  744. info.si_signo = SIGFPE;
  745. info.si_errno = 0;
  746. info.si_code = __SI_FAULT;
  747. info.si_addr = eip;
  748. /*
  749. * The SIMD FPU exceptions are handled a little differently, as there
  750. * is only a single status/control register. Thus, to determine which
  751. * unmasked exception was caught we must mask the exception mask bits
  752. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  753. */
  754. mxcsr = get_fpu_mxcsr(task);
  755. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  756. case 0x000:
  757. default:
  758. break;
  759. case 0x001: /* Invalid Op */
  760. info.si_code = FPE_FLTINV;
  761. break;
  762. case 0x002: /* Denormalize */
  763. case 0x010: /* Underflow */
  764. info.si_code = FPE_FLTUND;
  765. break;
  766. case 0x004: /* Zero Divide */
  767. info.si_code = FPE_FLTDIV;
  768. break;
  769. case 0x008: /* Overflow */
  770. info.si_code = FPE_FLTOVF;
  771. break;
  772. case 0x020: /* Precision */
  773. info.si_code = FPE_FLTRES;
  774. break;
  775. }
  776. force_sig_info(SIGFPE, &info, task);
  777. }
  778. fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
  779. long error_code)
  780. {
  781. if (cpu_has_xmm) {
  782. /* Handle SIMD FPU exceptions on PIII+ processors. */
  783. ignore_fpu_irq = 1;
  784. simd_math_error((void __user *)regs->eip);
  785. } else {
  786. /*
  787. * Handle strange cache flush from user space exception
  788. * in all other cases. This is undocumented behaviour.
  789. */
  790. if (regs->eflags & VM_MASK) {
  791. handle_vm86_fault((struct kernel_vm86_regs *)regs,
  792. error_code);
  793. return;
  794. }
  795. current->thread.trap_no = 19;
  796. current->thread.error_code = error_code;
  797. die_if_kernel("cache flush denied", regs, error_code);
  798. force_sig(SIGSEGV, current);
  799. }
  800. }
  801. fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
  802. long error_code)
  803. {
  804. #if 0
  805. /* No need to warn about this any longer. */
  806. printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  807. #endif
  808. }
  809. fastcall void setup_x86_bogus_stack(unsigned char * stk)
  810. {
  811. unsigned long *switch16_ptr, *switch32_ptr;
  812. struct pt_regs *regs;
  813. unsigned long stack_top, stack_bot;
  814. unsigned short iret_frame16_off;
  815. int cpu = smp_processor_id();
  816. /* reserve the space on 32bit stack for the magic switch16 pointer */
  817. memmove(stk, stk + 8, sizeof(struct pt_regs));
  818. switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
  819. regs = (struct pt_regs *)stk;
  820. /* now the switch32 on 16bit stack */
  821. stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
  822. stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
  823. switch32_ptr = (unsigned long *)(stack_top - 8);
  824. iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
  825. /* copy iret frame on 16bit stack */
  826. memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
  827. /* fill in the switch pointers */
  828. switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
  829. switch16_ptr[1] = __ESPFIX_SS;
  830. switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
  831. 8 - CPU_16BIT_STACK_SIZE;
  832. switch32_ptr[1] = __KERNEL_DS;
  833. }
  834. fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
  835. {
  836. unsigned long *switch32_ptr;
  837. unsigned char *stack16, *stack32;
  838. unsigned long stack_top, stack_bot;
  839. int len;
  840. int cpu = smp_processor_id();
  841. stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
  842. stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
  843. switch32_ptr = (unsigned long *)(stack_top - 8);
  844. /* copy the data from 16bit stack to 32bit stack */
  845. len = CPU_16BIT_STACK_SIZE - 8 - sp;
  846. stack16 = (unsigned char *)(stack_bot + sp);
  847. stack32 = (unsigned char *)
  848. (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
  849. memcpy(stack32, stack16, len);
  850. return stack32;
  851. }
  852. /*
  853. * 'math_state_restore()' saves the current math information in the
  854. * old math state array, and gets the new ones from the current task
  855. *
  856. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  857. * Don't touch unless you *really* know how it works.
  858. *
  859. * Must be called with kernel preemption disabled (in this case,
  860. * local interrupts are disabled at the call-site in entry.S).
  861. */
  862. asmlinkage void math_state_restore(struct pt_regs regs)
  863. {
  864. struct thread_info *thread = current_thread_info();
  865. struct task_struct *tsk = thread->task;
  866. clts(); /* Allow maths ops (or we recurse) */
  867. if (!tsk_used_math(tsk))
  868. init_fpu(tsk);
  869. restore_fpu(tsk);
  870. thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
  871. }
  872. #ifndef CONFIG_MATH_EMULATION
  873. asmlinkage void math_emulate(long arg)
  874. {
  875. printk("math-emulation not enabled and no coprocessor found.\n");
  876. printk("killing %s.\n",current->comm);
  877. force_sig(SIGFPE,current);
  878. schedule();
  879. }
  880. #endif /* CONFIG_MATH_EMULATION */
  881. #ifdef CONFIG_X86_F00F_BUG
  882. void __init trap_init_f00f_bug(void)
  883. {
  884. __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
  885. /*
  886. * Update the IDT descriptor and reload the IDT so that
  887. * it uses the read-only mapped virtual address.
  888. */
  889. idt_descr.address = fix_to_virt(FIX_F00F_IDT);
  890. __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
  891. }
  892. #endif
  893. #define _set_gate(gate_addr,type,dpl,addr,seg) \
  894. do { \
  895. int __d0, __d1; \
  896. __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
  897. "movw %4,%%dx\n\t" \
  898. "movl %%eax,%0\n\t" \
  899. "movl %%edx,%1" \
  900. :"=m" (*((long *) (gate_addr))), \
  901. "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
  902. :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
  903. "3" ((char *) (addr)),"2" ((seg) << 16)); \
  904. } while (0)
  905. /*
  906. * This needs to use 'idt_table' rather than 'idt', and
  907. * thus use the _nonmapped_ version of the IDT, as the
  908. * Pentium F0 0F bugfix can have resulted in the mapped
  909. * IDT being write-protected.
  910. */
  911. void set_intr_gate(unsigned int n, void *addr)
  912. {
  913. _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
  914. }
  915. /*
  916. * This routine sets up an interrupt gate at directory privilege level 3.
  917. */
  918. static inline void set_system_intr_gate(unsigned int n, void *addr)
  919. {
  920. _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);
  921. }
  922. static void __init set_trap_gate(unsigned int n, void *addr)
  923. {
  924. _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
  925. }
  926. static void __init set_system_gate(unsigned int n, void *addr)
  927. {
  928. _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
  929. }
  930. static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
  931. {
  932. _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
  933. }
  934. void __init trap_init(void)
  935. {
  936. #ifdef CONFIG_EISA
  937. void __iomem *p = ioremap(0x0FFFD9, 4);
  938. if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
  939. EISA_bus = 1;
  940. }
  941. iounmap(p);
  942. #endif
  943. #ifdef CONFIG_X86_LOCAL_APIC
  944. init_apic_mappings();
  945. #endif
  946. set_trap_gate(0,&divide_error);
  947. set_intr_gate(1,&debug);
  948. set_intr_gate(2,&nmi);
  949. set_system_intr_gate(3, &int3); /* int3-5 can be called from all */
  950. set_system_gate(4,&overflow);
  951. set_system_gate(5,&bounds);
  952. set_trap_gate(6,&invalid_op);
  953. set_trap_gate(7,&device_not_available);
  954. set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
  955. set_trap_gate(9,&coprocessor_segment_overrun);
  956. set_trap_gate(10,&invalid_TSS);
  957. set_trap_gate(11,&segment_not_present);
  958. set_trap_gate(12,&stack_segment);
  959. set_trap_gate(13,&general_protection);
  960. set_intr_gate(14,&page_fault);
  961. set_trap_gate(15,&spurious_interrupt_bug);
  962. set_trap_gate(16,&coprocessor_error);
  963. set_trap_gate(17,&alignment_check);
  964. #ifdef CONFIG_X86_MCE
  965. set_trap_gate(18,&machine_check);
  966. #endif
  967. set_trap_gate(19,&simd_coprocessor_error);
  968. set_system_gate(SYSCALL_VECTOR,&system_call);
  969. /*
  970. * Should be a barrier for any external CPU state.
  971. */
  972. cpu_init();
  973. trap_init_hook();
  974. }
  975. static int __init kstack_setup(char *s)
  976. {
  977. kstack_depth_to_print = simple_strtoul(s, NULL, 0);
  978. return 0;
  979. }
  980. __setup("kstack=", kstack_setup);