traps.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. /*
  2. * linux/arch/i386/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Pentium III FXSR, SSE support
  7. * Gareth Hughes <gareth@valinux.com>, May 2000
  8. */
  9. /*
  10. * 'Traps.c' handles hardware traps and faults after we have saved some
  11. * state in 'asm.s'.
  12. */
  13. #include <linux/config.h>
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/errno.h>
  18. #include <linux/timer.h>
  19. #include <linux/mm.h>
  20. #include <linux/init.h>
  21. #include <linux/delay.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/highmem.h>
  25. #include <linux/kallsyms.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/utsname.h>
  28. #include <linux/kprobes.h>
  29. #include <linux/kexec.h>
  30. #ifdef CONFIG_EISA
  31. #include <linux/ioport.h>
  32. #include <linux/eisa.h>
  33. #endif
  34. #ifdef CONFIG_MCA
  35. #include <linux/mca.h>
  36. #endif
  37. #include <asm/processor.h>
  38. #include <asm/system.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/io.h>
  41. #include <asm/atomic.h>
  42. #include <asm/debugreg.h>
  43. #include <asm/desc.h>
  44. #include <asm/i387.h>
  45. #include <asm/nmi.h>
  46. #include <asm/smp.h>
  47. #include <asm/arch_hooks.h>
  48. #include <asm/kdebug.h>
  49. #include <linux/module.h>
  50. #include "mach_traps.h"
  51. asmlinkage int system_call(void);
  52. struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
  53. { 0, 0 }, { 0, 0 } };
  54. /* Do we ignore FPU interrupts ? */
  55. char ignore_fpu_irq = 0;
  56. /*
  57. * The IDT has to be page-aligned to simplify the Pentium
  58. * F0 0F bug workaround.. We have a special link segment
  59. * for this.
  60. */
  61. struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
  62. asmlinkage void divide_error(void);
  63. asmlinkage void debug(void);
  64. asmlinkage void nmi(void);
  65. asmlinkage void int3(void);
  66. asmlinkage void overflow(void);
  67. asmlinkage void bounds(void);
  68. asmlinkage void invalid_op(void);
  69. asmlinkage void device_not_available(void);
  70. asmlinkage void coprocessor_segment_overrun(void);
  71. asmlinkage void invalid_TSS(void);
  72. asmlinkage void segment_not_present(void);
  73. asmlinkage void stack_segment(void);
  74. asmlinkage void general_protection(void);
  75. asmlinkage void page_fault(void);
  76. asmlinkage void coprocessor_error(void);
  77. asmlinkage void simd_coprocessor_error(void);
  78. asmlinkage void alignment_check(void);
  79. asmlinkage void spurious_interrupt_bug(void);
  80. asmlinkage void machine_check(void);
  81. static int kstack_depth_to_print = 24;
  82. struct notifier_block *i386die_chain;
  83. static DEFINE_SPINLOCK(die_notifier_lock);
  84. int register_die_notifier(struct notifier_block *nb)
  85. {
  86. int err = 0;
  87. unsigned long flags;
  88. spin_lock_irqsave(&die_notifier_lock, flags);
  89. err = notifier_chain_register(&i386die_chain, nb);
  90. spin_unlock_irqrestore(&die_notifier_lock, flags);
  91. return err;
  92. }
  93. EXPORT_SYMBOL(register_die_notifier);
  94. static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
  95. {
  96. return p > (void *)tinfo &&
  97. p < (void *)tinfo + THREAD_SIZE - 3;
  98. }
  99. static void print_addr_and_symbol(unsigned long addr, char *log_lvl)
  100. {
  101. printk(log_lvl);
  102. printk(" [<%08lx>] ", addr);
  103. print_symbol("%s", addr);
  104. printk("\n");
  105. }
  106. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  107. unsigned long *stack, unsigned long ebp,
  108. char *log_lvl)
  109. {
  110. unsigned long addr;
  111. #ifdef CONFIG_FRAME_POINTER
  112. while (valid_stack_ptr(tinfo, (void *)ebp)) {
  113. addr = *(unsigned long *)(ebp + 4);
  114. print_addr_and_symbol(addr, log_lvl);
  115. ebp = *(unsigned long *)ebp;
  116. }
  117. #else
  118. while (valid_stack_ptr(tinfo, stack)) {
  119. addr = *stack++;
  120. if (__kernel_text_address(addr))
  121. print_addr_and_symbol(addr, log_lvl);
  122. }
  123. #endif
  124. return ebp;
  125. }
  126. static void show_trace_log_lvl(struct task_struct *task,
  127. unsigned long *stack, char *log_lvl)
  128. {
  129. unsigned long ebp;
  130. if (!task)
  131. task = current;
  132. if (task == current) {
  133. /* Grab ebp right from our regs */
  134. asm ("movl %%ebp, %0" : "=r" (ebp) : );
  135. } else {
  136. /* ebp is the last reg pushed by switch_to */
  137. ebp = *(unsigned long *) task->thread.esp;
  138. }
  139. while (1) {
  140. struct thread_info *context;
  141. context = (struct thread_info *)
  142. ((unsigned long)stack & (~(THREAD_SIZE - 1)));
  143. ebp = print_context_stack(context, stack, ebp, log_lvl);
  144. stack = (unsigned long*)context->previous_esp;
  145. if (!stack)
  146. break;
  147. printk(log_lvl);
  148. printk(" =======================\n");
  149. }
  150. }
  151. void show_trace(struct task_struct *task, unsigned long * stack)
  152. {
  153. show_trace_log_lvl(task, stack, "");
  154. }
  155. static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
  156. char *log_lvl)
  157. {
  158. unsigned long *stack;
  159. int i;
  160. if (esp == NULL) {
  161. if (task)
  162. esp = (unsigned long*)task->thread.esp;
  163. else
  164. esp = (unsigned long *)&esp;
  165. }
  166. stack = esp;
  167. printk(log_lvl);
  168. for(i = 0; i < kstack_depth_to_print; i++) {
  169. if (kstack_end(stack))
  170. break;
  171. if (i && ((i % 8) == 0)) {
  172. printk("\n");
  173. printk(log_lvl);
  174. printk(" ");
  175. }
  176. printk("%08lx ", *stack++);
  177. }
  178. printk("\n");
  179. printk(log_lvl);
  180. printk("Call Trace:\n");
  181. show_trace_log_lvl(task, esp, log_lvl);
  182. }
  183. void show_stack(struct task_struct *task, unsigned long *esp)
  184. {
  185. show_stack_log_lvl(task, esp, "");
  186. }
  187. /*
  188. * The architecture-independent dump_stack generator
  189. */
  190. void dump_stack(void)
  191. {
  192. unsigned long stack;
  193. show_trace(current, &stack);
  194. }
  195. EXPORT_SYMBOL(dump_stack);
  196. void show_registers(struct pt_regs *regs)
  197. {
  198. int i;
  199. int in_kernel = 1;
  200. unsigned long esp;
  201. unsigned short ss;
  202. esp = (unsigned long) (&regs->esp);
  203. savesegment(ss, ss);
  204. if (user_mode(regs)) {
  205. in_kernel = 0;
  206. esp = regs->esp;
  207. ss = regs->xss & 0xffff;
  208. }
  209. print_modules();
  210. printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
  211. "EFLAGS: %08lx (%s %.*s) \n",
  212. smp_processor_id(), 0xffff & regs->xcs, regs->eip,
  213. print_tainted(), regs->eflags, system_utsname.release,
  214. (int)strcspn(system_utsname.version, " "),
  215. system_utsname.version);
  216. print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
  217. printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  218. regs->eax, regs->ebx, regs->ecx, regs->edx);
  219. printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  220. regs->esi, regs->edi, regs->ebp, esp);
  221. printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
  222. regs->xds & 0xffff, regs->xes & 0xffff, ss);
  223. printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
  224. current->comm, current->pid, current_thread_info(), current);
  225. /*
  226. * When in-kernel, we also print out the stack and code at the
  227. * time of the fault..
  228. */
  229. if (in_kernel) {
  230. u8 __user *eip;
  231. printk("\n" KERN_EMERG "Stack: ");
  232. show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG);
  233. printk(KERN_EMERG "Code: ");
  234. eip = (u8 __user *)regs->eip - 43;
  235. for (i = 0; i < 64; i++, eip++) {
  236. unsigned char c;
  237. if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
  238. printk(" Bad EIP value.");
  239. break;
  240. }
  241. if (eip == (u8 __user *)regs->eip)
  242. printk("<%02x> ", c);
  243. else
  244. printk("%02x ", c);
  245. }
  246. }
  247. printk("\n");
  248. }
  249. static void handle_BUG(struct pt_regs *regs)
  250. {
  251. unsigned short ud2;
  252. unsigned short line;
  253. char *file;
  254. char c;
  255. unsigned long eip;
  256. eip = regs->eip;
  257. if (eip < PAGE_OFFSET)
  258. goto no_bug;
  259. if (__get_user(ud2, (unsigned short __user *)eip))
  260. goto no_bug;
  261. if (ud2 != 0x0b0f)
  262. goto no_bug;
  263. if (__get_user(line, (unsigned short __user *)(eip + 2)))
  264. goto bug;
  265. if (__get_user(file, (char * __user *)(eip + 4)) ||
  266. (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
  267. file = "<bad filename>";
  268. printk(KERN_EMERG "------------[ cut here ]------------\n");
  269. printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
  270. no_bug:
  271. return;
  272. /* Here we know it was a BUG but file-n-line is unavailable */
  273. bug:
  274. printk(KERN_EMERG "Kernel BUG\n");
  275. }
  276. /* This is gone through when something in the kernel
  277. * has done something bad and is about to be terminated.
  278. */
  279. void die(const char * str, struct pt_regs * regs, long err)
  280. {
  281. static struct {
  282. spinlock_t lock;
  283. u32 lock_owner;
  284. int lock_owner_depth;
  285. } die = {
  286. .lock = SPIN_LOCK_UNLOCKED,
  287. .lock_owner = -1,
  288. .lock_owner_depth = 0
  289. };
  290. static int die_counter;
  291. unsigned long flags;
  292. if (die.lock_owner != raw_smp_processor_id()) {
  293. console_verbose();
  294. spin_lock_irqsave(&die.lock, flags);
  295. die.lock_owner = smp_processor_id();
  296. die.lock_owner_depth = 0;
  297. bust_spinlocks(1);
  298. }
  299. else
  300. local_save_flags(flags);
  301. if (++die.lock_owner_depth < 3) {
  302. int nl = 0;
  303. handle_BUG(regs);
  304. printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
  305. #ifdef CONFIG_PREEMPT
  306. printk(KERN_EMERG "PREEMPT ");
  307. nl = 1;
  308. #endif
  309. #ifdef CONFIG_SMP
  310. if (!nl)
  311. printk(KERN_EMERG);
  312. printk("SMP ");
  313. nl = 1;
  314. #endif
  315. #ifdef CONFIG_DEBUG_PAGEALLOC
  316. if (!nl)
  317. printk(KERN_EMERG);
  318. printk("DEBUG_PAGEALLOC");
  319. nl = 1;
  320. #endif
  321. if (nl)
  322. printk("\n");
  323. notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
  324. show_registers(regs);
  325. } else
  326. printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
  327. bust_spinlocks(0);
  328. die.lock_owner = -1;
  329. spin_unlock_irqrestore(&die.lock, flags);
  330. if (kexec_should_crash(current))
  331. crash_kexec(regs);
  332. if (in_interrupt())
  333. panic("Fatal exception in interrupt");
  334. if (panic_on_oops) {
  335. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  336. ssleep(5);
  337. panic("Fatal exception");
  338. }
  339. do_exit(SIGSEGV);
  340. }
  341. static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  342. {
  343. if (!user_mode_vm(regs))
  344. die(str, regs, err);
  345. }
  346. static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
  347. struct pt_regs * regs, long error_code,
  348. siginfo_t *info)
  349. {
  350. struct task_struct *tsk = current;
  351. tsk->thread.error_code = error_code;
  352. tsk->thread.trap_no = trapnr;
  353. if (regs->eflags & VM_MASK) {
  354. if (vm86)
  355. goto vm86_trap;
  356. goto trap_signal;
  357. }
  358. if (!user_mode(regs))
  359. goto kernel_trap;
  360. trap_signal: {
  361. if (info)
  362. force_sig_info(signr, info, tsk);
  363. else
  364. force_sig(signr, tsk);
  365. return;
  366. }
  367. kernel_trap: {
  368. if (!fixup_exception(regs))
  369. die(str, regs, error_code);
  370. return;
  371. }
  372. vm86_trap: {
  373. int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
  374. if (ret) goto trap_signal;
  375. return;
  376. }
  377. }
  378. #define DO_ERROR(trapnr, signr, str, name) \
  379. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  380. { \
  381. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  382. == NOTIFY_STOP) \
  383. return; \
  384. do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
  385. }
  386. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  387. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  388. { \
  389. siginfo_t info; \
  390. info.si_signo = signr; \
  391. info.si_errno = 0; \
  392. info.si_code = sicode; \
  393. info.si_addr = (void __user *)siaddr; \
  394. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  395. == NOTIFY_STOP) \
  396. return; \
  397. do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
  398. }
  399. #define DO_VM86_ERROR(trapnr, signr, str, name) \
  400. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  401. { \
  402. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  403. == NOTIFY_STOP) \
  404. return; \
  405. do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
  406. }
  407. #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  408. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  409. { \
  410. siginfo_t info; \
  411. info.si_signo = signr; \
  412. info.si_errno = 0; \
  413. info.si_code = sicode; \
  414. info.si_addr = (void __user *)siaddr; \
  415. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  416. == NOTIFY_STOP) \
  417. return; \
  418. do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
  419. }
  420. DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
  421. #ifndef CONFIG_KPROBES
  422. DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
  423. #endif
  424. DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
  425. DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
  426. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
  427. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  428. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  429. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  430. DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
  431. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  432. DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
  433. fastcall void __kprobes do_general_protection(struct pt_regs * regs,
  434. long error_code)
  435. {
  436. int cpu = get_cpu();
  437. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  438. struct thread_struct *thread = &current->thread;
  439. /*
  440. * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
  441. * invalid offset set (the LAZY one) and the faulting thread has
  442. * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
  443. * and we set the offset field correctly. Then we let the CPU to
  444. * restart the faulting instruction.
  445. */
  446. if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
  447. thread->io_bitmap_ptr) {
  448. memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
  449. thread->io_bitmap_max);
  450. /*
  451. * If the previously set map was extending to higher ports
  452. * than the current one, pad extra space with 0xff (no access).
  453. */
  454. if (thread->io_bitmap_max < tss->io_bitmap_max)
  455. memset((char *) tss->io_bitmap +
  456. thread->io_bitmap_max, 0xff,
  457. tss->io_bitmap_max - thread->io_bitmap_max);
  458. tss->io_bitmap_max = thread->io_bitmap_max;
  459. tss->io_bitmap_base = IO_BITMAP_OFFSET;
  460. tss->io_bitmap_owner = thread;
  461. put_cpu();
  462. return;
  463. }
  464. put_cpu();
  465. current->thread.error_code = error_code;
  466. current->thread.trap_no = 13;
  467. if (regs->eflags & VM_MASK)
  468. goto gp_in_vm86;
  469. if (!user_mode(regs))
  470. goto gp_in_kernel;
  471. current->thread.error_code = error_code;
  472. current->thread.trap_no = 13;
  473. force_sig(SIGSEGV, current);
  474. return;
  475. gp_in_vm86:
  476. local_irq_enable();
  477. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  478. return;
  479. gp_in_kernel:
  480. if (!fixup_exception(regs)) {
  481. if (notify_die(DIE_GPF, "general protection fault", regs,
  482. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  483. return;
  484. die("general protection fault", regs, error_code);
  485. }
  486. }
  487. static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
  488. {
  489. printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
  490. "to continue\n");
  491. printk(KERN_EMERG "You probably have a hardware problem with your RAM "
  492. "chips\n");
  493. /* Clear and disable the memory parity error line. */
  494. clear_mem_error(reason);
  495. }
  496. static void io_check_error(unsigned char reason, struct pt_regs * regs)
  497. {
  498. unsigned long i;
  499. printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
  500. show_registers(regs);
  501. /* Re-enable the IOCK line, wait for a few seconds */
  502. reason = (reason & 0xf) | 8;
  503. outb(reason, 0x61);
  504. i = 2000;
  505. while (--i) udelay(1000);
  506. reason &= ~8;
  507. outb(reason, 0x61);
  508. }
  509. static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  510. {
  511. #ifdef CONFIG_MCA
  512. /* Might actually be able to figure out what the guilty party
  513. * is. */
  514. if( MCA_bus ) {
  515. mca_handle_nmi();
  516. return;
  517. }
  518. #endif
  519. printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
  520. reason, smp_processor_id());
  521. printk("Dazed and confused, but trying to continue\n");
  522. printk("Do you have a strange power saving mode enabled?\n");
  523. }
  524. static DEFINE_SPINLOCK(nmi_print_lock);
  525. void die_nmi (struct pt_regs *regs, const char *msg)
  526. {
  527. if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) ==
  528. NOTIFY_STOP)
  529. return;
  530. spin_lock(&nmi_print_lock);
  531. /*
  532. * We are in trouble anyway, lets at least try
  533. * to get a message out.
  534. */
  535. bust_spinlocks(1);
  536. printk(KERN_EMERG "%s", msg);
  537. printk(" on CPU%d, eip %08lx, registers:\n",
  538. smp_processor_id(), regs->eip);
  539. show_registers(regs);
  540. printk(KERN_EMERG "console shuts up ...\n");
  541. console_silent();
  542. spin_unlock(&nmi_print_lock);
  543. bust_spinlocks(0);
  544. /* If we are in kernel we are probably nested up pretty bad
  545. * and might aswell get out now while we still can.
  546. */
  547. if (!user_mode(regs)) {
  548. current->thread.trap_no = 2;
  549. crash_kexec(regs);
  550. }
  551. do_exit(SIGSEGV);
  552. }
  553. static void default_do_nmi(struct pt_regs * regs)
  554. {
  555. unsigned char reason = 0;
  556. /* Only the BSP gets external NMIs from the system. */
  557. if (!smp_processor_id())
  558. reason = get_nmi_reason();
  559. if (!(reason & 0xc0)) {
  560. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
  561. == NOTIFY_STOP)
  562. return;
  563. #ifdef CONFIG_X86_LOCAL_APIC
  564. /*
  565. * Ok, so this is none of the documented NMI sources,
  566. * so it must be the NMI watchdog.
  567. */
  568. if (nmi_watchdog) {
  569. nmi_watchdog_tick(regs);
  570. return;
  571. }
  572. #endif
  573. unknown_nmi_error(reason, regs);
  574. return;
  575. }
  576. if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
  577. return;
  578. if (reason & 0x80)
  579. mem_parity_error(reason, regs);
  580. if (reason & 0x40)
  581. io_check_error(reason, regs);
  582. /*
  583. * Reassert NMI in case it became active meanwhile
  584. * as it's edge-triggered.
  585. */
  586. reassert_nmi();
  587. }
  588. static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
  589. {
  590. return 0;
  591. }
  592. static nmi_callback_t nmi_callback = dummy_nmi_callback;
  593. fastcall void do_nmi(struct pt_regs * regs, long error_code)
  594. {
  595. int cpu;
  596. nmi_enter();
  597. cpu = smp_processor_id();
  598. ++nmi_count(cpu);
  599. if (!rcu_dereference(nmi_callback)(regs, cpu))
  600. default_do_nmi(regs);
  601. nmi_exit();
  602. }
  603. void set_nmi_callback(nmi_callback_t callback)
  604. {
  605. rcu_assign_pointer(nmi_callback, callback);
  606. }
  607. EXPORT_SYMBOL_GPL(set_nmi_callback);
  608. void unset_nmi_callback(void)
  609. {
  610. nmi_callback = dummy_nmi_callback;
  611. }
  612. EXPORT_SYMBOL_GPL(unset_nmi_callback);
  613. #ifdef CONFIG_KPROBES
  614. fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
  615. {
  616. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
  617. == NOTIFY_STOP)
  618. return;
  619. /* This is an interrupt gate, because kprobes wants interrupts
  620. disabled. Normal trap handlers don't. */
  621. restore_interrupts(regs);
  622. do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
  623. }
  624. #endif
  625. /*
  626. * Our handling of the processor debug registers is non-trivial.
  627. * We do not clear them on entry and exit from the kernel. Therefore
  628. * it is possible to get a watchpoint trap here from inside the kernel.
  629. * However, the code in ./ptrace.c has ensured that the user can
  630. * only set watchpoints on userspace addresses. Therefore the in-kernel
  631. * watchpoint trap can only occur in code which is reading/writing
  632. * from user space. Such code must not hold kernel locks (since it
  633. * can equally take a page fault), therefore it is safe to call
  634. * force_sig_info even though that claims and releases locks.
  635. *
  636. * Code in ./signal.c ensures that the debug control register
  637. * is restored before we deliver any signal, and therefore that
  638. * user code runs with the correct debug control register even though
  639. * we clear it here.
  640. *
  641. * Being careful here means that we don't have to be as careful in a
  642. * lot of more complicated places (task switching can be a bit lazy
  643. * about restoring all the debug state, and ptrace doesn't have to
  644. * find every occurrence of the TF bit that could be saved away even
  645. * by user code)
  646. */
  647. fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
  648. {
  649. unsigned int condition;
  650. struct task_struct *tsk = current;
  651. get_debugreg(condition, 6);
  652. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  653. SIGTRAP) == NOTIFY_STOP)
  654. return;
  655. /* It's safe to allow irq's after DR6 has been saved */
  656. if (regs->eflags & X86_EFLAGS_IF)
  657. local_irq_enable();
  658. /* Mask out spurious debug traps due to lazy DR7 setting */
  659. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  660. if (!tsk->thread.debugreg[7])
  661. goto clear_dr7;
  662. }
  663. if (regs->eflags & VM_MASK)
  664. goto debug_vm86;
  665. /* Save debug status register where ptrace can see it */
  666. tsk->thread.debugreg[6] = condition;
  667. /*
  668. * Single-stepping through TF: make sure we ignore any events in
  669. * kernel space (but re-enable TF when returning to user mode).
  670. */
  671. if (condition & DR_STEP) {
  672. /*
  673. * We already checked v86 mode above, so we can
  674. * check for kernel mode by just checking the CPL
  675. * of CS.
  676. */
  677. if (!user_mode(regs))
  678. goto clear_TF_reenable;
  679. }
  680. /* Ok, finally something we can handle */
  681. send_sigtrap(tsk, regs, error_code);
  682. /* Disable additional traps. They'll be re-enabled when
  683. * the signal is delivered.
  684. */
  685. clear_dr7:
  686. set_debugreg(0, 7);
  687. return;
  688. debug_vm86:
  689. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
  690. return;
  691. clear_TF_reenable:
  692. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  693. regs->eflags &= ~TF_MASK;
  694. return;
  695. }
  696. /*
  697. * Note that we play around with the 'TS' bit in an attempt to get
  698. * the correct behaviour even in the presence of the asynchronous
  699. * IRQ13 behaviour
  700. */
  701. void math_error(void __user *eip)
  702. {
  703. struct task_struct * task;
  704. siginfo_t info;
  705. unsigned short cwd, swd;
  706. /*
  707. * Save the info for the exception handler and clear the error.
  708. */
  709. task = current;
  710. save_init_fpu(task);
  711. task->thread.trap_no = 16;
  712. task->thread.error_code = 0;
  713. info.si_signo = SIGFPE;
  714. info.si_errno = 0;
  715. info.si_code = __SI_FAULT;
  716. info.si_addr = eip;
  717. /*
  718. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  719. * status. 0x3f is the exception bits in these regs, 0x200 is the
  720. * C1 reg you need in case of a stack fault, 0x040 is the stack
  721. * fault bit. We should only be taking one exception at a time,
  722. * so if this combination doesn't produce any single exception,
  723. * then we have a bad program that isn't syncronizing its FPU usage
  724. * and it will suffer the consequences since we won't be able to
  725. * fully reproduce the context of the exception
  726. */
  727. cwd = get_fpu_cwd(task);
  728. swd = get_fpu_swd(task);
  729. switch (swd & ~cwd & 0x3f) {
  730. case 0x000: /* No unmasked exception */
  731. return;
  732. default: /* Multiple exceptions */
  733. break;
  734. case 0x001: /* Invalid Op */
  735. /*
  736. * swd & 0x240 == 0x040: Stack Underflow
  737. * swd & 0x240 == 0x240: Stack Overflow
  738. * User must clear the SF bit (0x40) if set
  739. */
  740. info.si_code = FPE_FLTINV;
  741. break;
  742. case 0x002: /* Denormalize */
  743. case 0x010: /* Underflow */
  744. info.si_code = FPE_FLTUND;
  745. break;
  746. case 0x004: /* Zero Divide */
  747. info.si_code = FPE_FLTDIV;
  748. break;
  749. case 0x008: /* Overflow */
  750. info.si_code = FPE_FLTOVF;
  751. break;
  752. case 0x020: /* Precision */
  753. info.si_code = FPE_FLTRES;
  754. break;
  755. }
  756. force_sig_info(SIGFPE, &info, task);
  757. }
  758. fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
  759. {
  760. ignore_fpu_irq = 1;
  761. math_error((void __user *)regs->eip);
  762. }
  763. static void simd_math_error(void __user *eip)
  764. {
  765. struct task_struct * task;
  766. siginfo_t info;
  767. unsigned short mxcsr;
  768. /*
  769. * Save the info for the exception handler and clear the error.
  770. */
  771. task = current;
  772. save_init_fpu(task);
  773. task->thread.trap_no = 19;
  774. task->thread.error_code = 0;
  775. info.si_signo = SIGFPE;
  776. info.si_errno = 0;
  777. info.si_code = __SI_FAULT;
  778. info.si_addr = eip;
  779. /*
  780. * The SIMD FPU exceptions are handled a little differently, as there
  781. * is only a single status/control register. Thus, to determine which
  782. * unmasked exception was caught we must mask the exception mask bits
  783. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  784. */
  785. mxcsr = get_fpu_mxcsr(task);
  786. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  787. case 0x000:
  788. default:
  789. break;
  790. case 0x001: /* Invalid Op */
  791. info.si_code = FPE_FLTINV;
  792. break;
  793. case 0x002: /* Denormalize */
  794. case 0x010: /* Underflow */
  795. info.si_code = FPE_FLTUND;
  796. break;
  797. case 0x004: /* Zero Divide */
  798. info.si_code = FPE_FLTDIV;
  799. break;
  800. case 0x008: /* Overflow */
  801. info.si_code = FPE_FLTOVF;
  802. break;
  803. case 0x020: /* Precision */
  804. info.si_code = FPE_FLTRES;
  805. break;
  806. }
  807. force_sig_info(SIGFPE, &info, task);
  808. }
  809. fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
  810. long error_code)
  811. {
  812. if (cpu_has_xmm) {
  813. /* Handle SIMD FPU exceptions on PIII+ processors. */
  814. ignore_fpu_irq = 1;
  815. simd_math_error((void __user *)regs->eip);
  816. } else {
  817. /*
  818. * Handle strange cache flush from user space exception
  819. * in all other cases. This is undocumented behaviour.
  820. */
  821. if (regs->eflags & VM_MASK) {
  822. handle_vm86_fault((struct kernel_vm86_regs *)regs,
  823. error_code);
  824. return;
  825. }
  826. current->thread.trap_no = 19;
  827. current->thread.error_code = error_code;
  828. die_if_kernel("cache flush denied", regs, error_code);
  829. force_sig(SIGSEGV, current);
  830. }
  831. }
  832. fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
  833. long error_code)
  834. {
  835. #if 0
  836. /* No need to warn about this any longer. */
  837. printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  838. #endif
  839. }
  840. fastcall void setup_x86_bogus_stack(unsigned char * stk)
  841. {
  842. unsigned long *switch16_ptr, *switch32_ptr;
  843. struct pt_regs *regs;
  844. unsigned long stack_top, stack_bot;
  845. unsigned short iret_frame16_off;
  846. int cpu = smp_processor_id();
  847. /* reserve the space on 32bit stack for the magic switch16 pointer */
  848. memmove(stk, stk + 8, sizeof(struct pt_regs));
  849. switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
  850. regs = (struct pt_regs *)stk;
  851. /* now the switch32 on 16bit stack */
  852. stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
  853. stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
  854. switch32_ptr = (unsigned long *)(stack_top - 8);
  855. iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
  856. /* copy iret frame on 16bit stack */
  857. memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
  858. /* fill in the switch pointers */
  859. switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
  860. switch16_ptr[1] = __ESPFIX_SS;
  861. switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
  862. 8 - CPU_16BIT_STACK_SIZE;
  863. switch32_ptr[1] = __KERNEL_DS;
  864. }
  865. fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
  866. {
  867. unsigned long *switch32_ptr;
  868. unsigned char *stack16, *stack32;
  869. unsigned long stack_top, stack_bot;
  870. int len;
  871. int cpu = smp_processor_id();
  872. stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
  873. stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
  874. switch32_ptr = (unsigned long *)(stack_top - 8);
  875. /* copy the data from 16bit stack to 32bit stack */
  876. len = CPU_16BIT_STACK_SIZE - 8 - sp;
  877. stack16 = (unsigned char *)(stack_bot + sp);
  878. stack32 = (unsigned char *)
  879. (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
  880. memcpy(stack32, stack16, len);
  881. return stack32;
  882. }
  883. /*
  884. * 'math_state_restore()' saves the current math information in the
  885. * old math state array, and gets the new ones from the current task
  886. *
  887. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  888. * Don't touch unless you *really* know how it works.
  889. *
  890. * Must be called with kernel preemption disabled (in this case,
  891. * local interrupts are disabled at the call-site in entry.S).
  892. */
  893. asmlinkage void math_state_restore(struct pt_regs regs)
  894. {
  895. struct thread_info *thread = current_thread_info();
  896. struct task_struct *tsk = thread->task;
  897. clts(); /* Allow maths ops (or we recurse) */
  898. if (!tsk_used_math(tsk))
  899. init_fpu(tsk);
  900. restore_fpu(tsk);
  901. thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
  902. }
  903. #ifndef CONFIG_MATH_EMULATION
  904. asmlinkage void math_emulate(long arg)
  905. {
  906. printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
  907. printk(KERN_EMERG "killing %s.\n",current->comm);
  908. force_sig(SIGFPE,current);
  909. schedule();
  910. }
  911. #endif /* CONFIG_MATH_EMULATION */
  912. #ifdef CONFIG_X86_F00F_BUG
  913. void __init trap_init_f00f_bug(void)
  914. {
  915. __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
  916. /*
  917. * Update the IDT descriptor and reload the IDT so that
  918. * it uses the read-only mapped virtual address.
  919. */
  920. idt_descr.address = fix_to_virt(FIX_F00F_IDT);
  921. load_idt(&idt_descr);
  922. }
  923. #endif
  924. #define _set_gate(gate_addr,type,dpl,addr,seg) \
  925. do { \
  926. int __d0, __d1; \
  927. __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
  928. "movw %4,%%dx\n\t" \
  929. "movl %%eax,%0\n\t" \
  930. "movl %%edx,%1" \
  931. :"=m" (*((long *) (gate_addr))), \
  932. "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
  933. :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
  934. "3" ((char *) (addr)),"2" ((seg) << 16)); \
  935. } while (0)
  936. /*
  937. * This needs to use 'idt_table' rather than 'idt', and
  938. * thus use the _nonmapped_ version of the IDT, as the
  939. * Pentium F0 0F bugfix can have resulted in the mapped
  940. * IDT being write-protected.
  941. */
  942. void set_intr_gate(unsigned int n, void *addr)
  943. {
  944. _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
  945. }
  946. /*
  947. * This routine sets up an interrupt gate at directory privilege level 3.
  948. */
  949. static inline void set_system_intr_gate(unsigned int n, void *addr)
  950. {
  951. _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);
  952. }
  953. static void __init set_trap_gate(unsigned int n, void *addr)
  954. {
  955. _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
  956. }
  957. static void __init set_system_gate(unsigned int n, void *addr)
  958. {
  959. _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
  960. }
  961. static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
  962. {
  963. _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
  964. }
  965. void __init trap_init(void)
  966. {
  967. #ifdef CONFIG_EISA
  968. void __iomem *p = ioremap(0x0FFFD9, 4);
  969. if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
  970. EISA_bus = 1;
  971. }
  972. iounmap(p);
  973. #endif
  974. #ifdef CONFIG_X86_LOCAL_APIC
  975. init_apic_mappings();
  976. #endif
  977. set_trap_gate(0,&divide_error);
  978. set_intr_gate(1,&debug);
  979. set_intr_gate(2,&nmi);
  980. set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
  981. set_system_gate(4,&overflow);
  982. set_trap_gate(5,&bounds);
  983. set_trap_gate(6,&invalid_op);
  984. set_trap_gate(7,&device_not_available);
  985. set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
  986. set_trap_gate(9,&coprocessor_segment_overrun);
  987. set_trap_gate(10,&invalid_TSS);
  988. set_trap_gate(11,&segment_not_present);
  989. set_trap_gate(12,&stack_segment);
  990. set_trap_gate(13,&general_protection);
  991. set_intr_gate(14,&page_fault);
  992. set_trap_gate(15,&spurious_interrupt_bug);
  993. set_trap_gate(16,&coprocessor_error);
  994. set_trap_gate(17,&alignment_check);
  995. #ifdef CONFIG_X86_MCE
  996. set_trap_gate(18,&machine_check);
  997. #endif
  998. set_trap_gate(19,&simd_coprocessor_error);
  999. if (cpu_has_fxsr) {
  1000. /*
  1001. * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
  1002. * Generates a compile-time "error: zero width for bit-field" if
  1003. * the alignment is wrong.
  1004. */
  1005. struct fxsrAlignAssert {
  1006. int _:!(offsetof(struct task_struct,
  1007. thread.i387.fxsave) & 15);
  1008. };
  1009. printk(KERN_INFO "Enabling fast FPU save and restore... ");
  1010. set_in_cr4(X86_CR4_OSFXSR);
  1011. printk("done.\n");
  1012. }
  1013. if (cpu_has_xmm) {
  1014. printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
  1015. "support... ");
  1016. set_in_cr4(X86_CR4_OSXMMEXCPT);
  1017. printk("done.\n");
  1018. }
  1019. set_system_gate(SYSCALL_VECTOR,&system_call);
  1020. /*
  1021. * Should be a barrier for any external CPU state.
  1022. */
  1023. cpu_init();
  1024. trap_init_hook();
  1025. }
  1026. static int __init kstack_setup(char *s)
  1027. {
  1028. kstack_depth_to_print = simple_strtoul(s, NULL, 0);
  1029. return 0;
  1030. }
  1031. __setup("kstack=", kstack_setup);