traps_32.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * Gareth Hughes <gareth@valinux.com>, May 2000
  6. */
  7. /*
  8. * 'Traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'asm.s'.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/timer.h>
  16. #include <linux/mm.h>
  17. #include <linux/init.h>
  18. #include <linux/delay.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/highmem.h>
  22. #include <linux/kallsyms.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/utsname.h>
  25. #include <linux/kprobes.h>
  26. #include <linux/kexec.h>
  27. #include <linux/unwind.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/nmi.h>
  30. #include <linux/bug.h>
  31. #ifdef CONFIG_EISA
  32. #include <linux/ioport.h>
  33. #include <linux/eisa.h>
  34. #endif
  35. #ifdef CONFIG_MCA
  36. #include <linux/mca.h>
  37. #endif
  38. #if defined(CONFIG_EDAC)
  39. #include <linux/edac.h>
  40. #endif
  41. #include <asm/processor.h>
  42. #include <asm/system.h>
  43. #include <asm/io.h>
  44. #include <asm/atomic.h>
  45. #include <asm/debugreg.h>
  46. #include <asm/desc.h>
  47. #include <asm/i387.h>
  48. #include <asm/nmi.h>
  49. #include <asm/unwind.h>
  50. #include <asm/smp.h>
  51. #include <asm/arch_hooks.h>
  52. #include <linux/kdebug.h>
  53. #include <asm/stacktrace.h>
  54. #include <linux/module.h>
  55. #include "mach_traps.h"
  56. int panic_on_unrecovered_nmi;
  57. asmlinkage int system_call(void);
  58. /* Do we ignore FPU interrupts ? */
  59. char ignore_fpu_irq = 0;
  60. /*
  61. * The IDT has to be page-aligned to simplify the Pentium
  62. * F0 0F bug workaround.. We have a special link segment
  63. * for this.
  64. */
  65. struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
  66. asmlinkage void divide_error(void);
  67. asmlinkage void debug(void);
  68. asmlinkage void nmi(void);
  69. asmlinkage void int3(void);
  70. asmlinkage void overflow(void);
  71. asmlinkage void bounds(void);
  72. asmlinkage void invalid_op(void);
  73. asmlinkage void device_not_available(void);
  74. asmlinkage void coprocessor_segment_overrun(void);
  75. asmlinkage void invalid_TSS(void);
  76. asmlinkage void segment_not_present(void);
  77. asmlinkage void stack_segment(void);
  78. asmlinkage void general_protection(void);
  79. asmlinkage void page_fault(void);
  80. asmlinkage void coprocessor_error(void);
  81. asmlinkage void simd_coprocessor_error(void);
  82. asmlinkage void alignment_check(void);
  83. asmlinkage void spurious_interrupt_bug(void);
  84. asmlinkage void machine_check(void);
  85. int kstack_depth_to_print = 24;
  86. static unsigned int code_bytes = 64;
  87. static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
  88. {
  89. return p > (void *)tinfo &&
  90. p <= (void *)tinfo + THREAD_SIZE - size;
  91. }
  92. /* The form of the top of the frame on the stack */
  93. struct stack_frame {
  94. struct stack_frame *next_frame;
  95. unsigned long return_address;
  96. };
  97. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  98. unsigned long *stack, unsigned long ebp,
  99. const struct stacktrace_ops *ops, void *data)
  100. {
  101. #ifdef CONFIG_FRAME_POINTER
  102. struct stack_frame *frame = (struct stack_frame *)ebp;
  103. while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
  104. struct stack_frame *next;
  105. unsigned long addr;
  106. addr = frame->return_address;
  107. ops->address(data, addr);
  108. /*
  109. * break out of recursive entries (such as
  110. * end_of_stack_stop_unwind_function). Also,
  111. * we can never allow a frame pointer to
  112. * move downwards!
  113. */
  114. next = frame->next_frame;
  115. if (next <= frame)
  116. break;
  117. frame = next;
  118. }
  119. #else
  120. while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
  121. unsigned long addr;
  122. addr = *stack++;
  123. if (__kernel_text_address(addr))
  124. ops->address(data, addr);
  125. }
  126. #endif
  127. return ebp;
  128. }
  129. #define MSG(msg) ops->warning(data, msg)
  130. void dump_trace(struct task_struct *task, struct pt_regs *regs,
  131. unsigned long *stack,
  132. const struct stacktrace_ops *ops, void *data)
  133. {
  134. unsigned long ebp = 0;
  135. if (!task)
  136. task = current;
  137. if (!stack) {
  138. unsigned long dummy;
  139. stack = &dummy;
  140. if (task != current)
  141. stack = (unsigned long *)task->thread.esp;
  142. }
  143. #ifdef CONFIG_FRAME_POINTER
  144. if (!ebp) {
  145. if (task == current) {
  146. /* Grab ebp right from our regs */
  147. asm ("movl %%ebp, %0" : "=r" (ebp) : );
  148. } else {
  149. /* ebp is the last reg pushed by switch_to */
  150. ebp = *(unsigned long *) task->thread.esp;
  151. }
  152. }
  153. #endif
  154. while (1) {
  155. struct thread_info *context;
  156. context = (struct thread_info *)
  157. ((unsigned long)stack & (~(THREAD_SIZE - 1)));
  158. ebp = print_context_stack(context, stack, ebp, ops, data);
  159. /* Should be after the line below, but somewhere
  160. in early boot context comes out corrupted and we
  161. can't reference it -AK */
  162. if (ops->stack(data, "IRQ") < 0)
  163. break;
  164. stack = (unsigned long*)context->previous_esp;
  165. if (!stack)
  166. break;
  167. touch_nmi_watchdog();
  168. }
  169. }
  170. EXPORT_SYMBOL(dump_trace);
  171. static void
  172. print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
  173. {
  174. printk(data);
  175. print_symbol(msg, symbol);
  176. printk("\n");
  177. }
  178. static void print_trace_warning(void *data, char *msg)
  179. {
  180. printk("%s%s\n", (char *)data, msg);
  181. }
  182. static int print_trace_stack(void *data, char *name)
  183. {
  184. return 0;
  185. }
  186. /*
  187. * Print one address/symbol entries per line.
  188. */
  189. static void print_trace_address(void *data, unsigned long addr)
  190. {
  191. printk("%s [<%08lx>] ", (char *)data, addr);
  192. print_symbol("%s\n", addr);
  193. touch_nmi_watchdog();
  194. }
  195. static const struct stacktrace_ops print_trace_ops = {
  196. .warning = print_trace_warning,
  197. .warning_symbol = print_trace_warning_symbol,
  198. .stack = print_trace_stack,
  199. .address = print_trace_address,
  200. };
  201. static void
  202. show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
  203. unsigned long * stack, char *log_lvl)
  204. {
  205. dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
  206. printk("%s =======================\n", log_lvl);
  207. }
  208. void show_trace(struct task_struct *task, struct pt_regs *regs,
  209. unsigned long * stack)
  210. {
  211. show_trace_log_lvl(task, regs, stack, "");
  212. }
  213. static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  214. unsigned long *esp, char *log_lvl)
  215. {
  216. unsigned long *stack;
  217. int i;
  218. if (esp == NULL) {
  219. if (task)
  220. esp = (unsigned long*)task->thread.esp;
  221. else
  222. esp = (unsigned long *)&esp;
  223. }
  224. stack = esp;
  225. for(i = 0; i < kstack_depth_to_print; i++) {
  226. if (kstack_end(stack))
  227. break;
  228. if (i && ((i % 8) == 0))
  229. printk("\n%s ", log_lvl);
  230. printk("%08lx ", *stack++);
  231. }
  232. printk("\n%sCall Trace:\n", log_lvl);
  233. show_trace_log_lvl(task, regs, esp, log_lvl);
  234. }
  235. void show_stack(struct task_struct *task, unsigned long *esp)
  236. {
  237. printk(" ");
  238. show_stack_log_lvl(task, NULL, esp, "");
  239. }
  240. /*
  241. * The architecture-independent dump_stack generator
  242. */
  243. void dump_stack(void)
  244. {
  245. unsigned long stack;
  246. show_trace(current, NULL, &stack);
  247. }
  248. EXPORT_SYMBOL(dump_stack);
  249. void show_registers(struct pt_regs *regs)
  250. {
  251. int i;
  252. print_modules();
  253. __show_registers(regs, 0);
  254. printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
  255. TASK_COMM_LEN, current->comm, current->pid,
  256. current_thread_info(), current, task_thread_info(current));
  257. /*
  258. * When in-kernel, we also print out the stack and code at the
  259. * time of the fault..
  260. */
  261. if (!user_mode_vm(regs)) {
  262. u8 *eip;
  263. unsigned int code_prologue = code_bytes * 43 / 64;
  264. unsigned int code_len = code_bytes;
  265. unsigned char c;
  266. printk("\n" KERN_EMERG "Stack: ");
  267. show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
  268. printk(KERN_EMERG "Code: ");
  269. eip = (u8 *)regs->eip - code_prologue;
  270. if (eip < (u8 *)PAGE_OFFSET ||
  271. probe_kernel_address(eip, c)) {
  272. /* try starting at EIP */
  273. eip = (u8 *)regs->eip;
  274. code_len = code_len - code_prologue + 1;
  275. }
  276. for (i = 0; i < code_len; i++, eip++) {
  277. if (eip < (u8 *)PAGE_OFFSET ||
  278. probe_kernel_address(eip, c)) {
  279. printk(" Bad EIP value.");
  280. break;
  281. }
  282. if (eip == (u8 *)regs->eip)
  283. printk("<%02x> ", c);
  284. else
  285. printk("%02x ", c);
  286. }
  287. }
  288. printk("\n");
  289. }
  290. int is_valid_bugaddr(unsigned long eip)
  291. {
  292. unsigned short ud2;
  293. if (eip < PAGE_OFFSET)
  294. return 0;
  295. if (probe_kernel_address((unsigned short *)eip, ud2))
  296. return 0;
  297. return ud2 == 0x0b0f;
  298. }
  299. /*
  300. * This is gone through when something in the kernel has done something bad and
  301. * is about to be terminated.
  302. */
  303. void die(const char * str, struct pt_regs * regs, long err)
  304. {
  305. static struct {
  306. spinlock_t lock;
  307. u32 lock_owner;
  308. int lock_owner_depth;
  309. } die = {
  310. .lock = __SPIN_LOCK_UNLOCKED(die.lock),
  311. .lock_owner = -1,
  312. .lock_owner_depth = 0
  313. };
  314. static int die_counter;
  315. unsigned long flags;
  316. oops_enter();
  317. if (die.lock_owner != raw_smp_processor_id()) {
  318. console_verbose();
  319. spin_lock_irqsave(&die.lock, flags);
  320. die.lock_owner = smp_processor_id();
  321. die.lock_owner_depth = 0;
  322. bust_spinlocks(1);
  323. }
  324. else
  325. local_save_flags(flags);
  326. if (++die.lock_owner_depth < 3) {
  327. unsigned long esp;
  328. unsigned short ss;
  329. report_bug(regs->eip, regs);
  330. printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
  331. ++die_counter);
  332. #ifdef CONFIG_PREEMPT
  333. printk("PREEMPT ");
  334. #endif
  335. #ifdef CONFIG_SMP
  336. printk("SMP ");
  337. #endif
  338. #ifdef CONFIG_DEBUG_PAGEALLOC
  339. printk("DEBUG_PAGEALLOC");
  340. #endif
  341. printk("\n");
  342. if (notify_die(DIE_OOPS, str, regs, err,
  343. current->thread.trap_no, SIGSEGV) !=
  344. NOTIFY_STOP) {
  345. show_registers(regs);
  346. /* Executive summary in case the oops scrolled away */
  347. esp = (unsigned long) (&regs->esp);
  348. savesegment(ss, ss);
  349. if (user_mode(regs)) {
  350. esp = regs->esp;
  351. ss = regs->xss & 0xffff;
  352. }
  353. printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
  354. print_symbol("%s", regs->eip);
  355. printk(" SS:ESP %04x:%08lx\n", ss, esp);
  356. }
  357. else
  358. regs = NULL;
  359. } else
  360. printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
  361. bust_spinlocks(0);
  362. die.lock_owner = -1;
  363. add_taint(TAINT_DIE);
  364. spin_unlock_irqrestore(&die.lock, flags);
  365. if (!regs)
  366. return;
  367. if (kexec_should_crash(current))
  368. crash_kexec(regs);
  369. if (in_interrupt())
  370. panic("Fatal exception in interrupt");
  371. if (panic_on_oops)
  372. panic("Fatal exception");
  373. oops_exit();
  374. do_exit(SIGSEGV);
  375. }
  376. static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  377. {
  378. if (!user_mode_vm(regs))
  379. die(str, regs, err);
  380. }
  381. static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
  382. struct pt_regs * regs, long error_code,
  383. siginfo_t *info)
  384. {
  385. struct task_struct *tsk = current;
  386. if (regs->eflags & VM_MASK) {
  387. if (vm86)
  388. goto vm86_trap;
  389. goto trap_signal;
  390. }
  391. if (!user_mode(regs))
  392. goto kernel_trap;
  393. trap_signal: {
  394. /*
  395. * We want error_code and trap_no set for userspace faults and
  396. * kernelspace faults which result in die(), but not
  397. * kernelspace faults which are fixed up. die() gives the
  398. * process no chance to handle the signal and notice the
  399. * kernel fault information, so that won't result in polluting
  400. * the information about previously queued, but not yet
  401. * delivered, faults. See also do_general_protection below.
  402. */
  403. tsk->thread.error_code = error_code;
  404. tsk->thread.trap_no = trapnr;
  405. if (info)
  406. force_sig_info(signr, info, tsk);
  407. else
  408. force_sig(signr, tsk);
  409. return;
  410. }
  411. kernel_trap: {
  412. if (!fixup_exception(regs)) {
  413. tsk->thread.error_code = error_code;
  414. tsk->thread.trap_no = trapnr;
  415. die(str, regs, error_code);
  416. }
  417. return;
  418. }
  419. vm86_trap: {
  420. int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
  421. if (ret) goto trap_signal;
  422. return;
  423. }
  424. }
  425. #define DO_ERROR(trapnr, signr, str, name) \
  426. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  427. { \
  428. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  429. == NOTIFY_STOP) \
  430. return; \
  431. do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
  432. }
  433. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
  434. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  435. { \
  436. siginfo_t info; \
  437. if (irq) \
  438. local_irq_enable(); \
  439. info.si_signo = signr; \
  440. info.si_errno = 0; \
  441. info.si_code = sicode; \
  442. info.si_addr = (void __user *)siaddr; \
  443. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  444. == NOTIFY_STOP) \
  445. return; \
  446. do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
  447. }
  448. #define DO_VM86_ERROR(trapnr, signr, str, name) \
  449. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  450. { \
  451. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  452. == NOTIFY_STOP) \
  453. return; \
  454. do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
  455. }
  456. #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  457. fastcall void do_##name(struct pt_regs * regs, long error_code) \
  458. { \
  459. siginfo_t info; \
  460. info.si_signo = signr; \
  461. info.si_errno = 0; \
  462. info.si_code = sicode; \
  463. info.si_addr = (void __user *)siaddr; \
  464. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  465. == NOTIFY_STOP) \
  466. return; \
  467. do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
  468. }
  469. DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
  470. #ifndef CONFIG_KPROBES
  471. DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
  472. #endif
  473. DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
  474. DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
  475. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
  476. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  477. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  478. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  479. DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
  480. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
  481. DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
  482. fastcall void __kprobes do_general_protection(struct pt_regs * regs,
  483. long error_code)
  484. {
  485. int cpu = get_cpu();
  486. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  487. struct thread_struct *thread = &current->thread;
  488. /*
  489. * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
  490. * invalid offset set (the LAZY one) and the faulting thread has
  491. * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
  492. * and we set the offset field correctly. Then we let the CPU to
  493. * restart the faulting instruction.
  494. */
  495. if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
  496. thread->io_bitmap_ptr) {
  497. memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
  498. thread->io_bitmap_max);
  499. /*
  500. * If the previously set map was extending to higher ports
  501. * than the current one, pad extra space with 0xff (no access).
  502. */
  503. if (thread->io_bitmap_max < tss->io_bitmap_max)
  504. memset((char *) tss->io_bitmap +
  505. thread->io_bitmap_max, 0xff,
  506. tss->io_bitmap_max - thread->io_bitmap_max);
  507. tss->io_bitmap_max = thread->io_bitmap_max;
  508. tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
  509. tss->io_bitmap_owner = thread;
  510. put_cpu();
  511. return;
  512. }
  513. put_cpu();
  514. if (regs->eflags & VM_MASK)
  515. goto gp_in_vm86;
  516. if (!user_mode(regs))
  517. goto gp_in_kernel;
  518. current->thread.error_code = error_code;
  519. current->thread.trap_no = 13;
  520. if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
  521. printk_ratelimit())
  522. printk(KERN_INFO
  523. "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
  524. current->comm, current->pid,
  525. regs->eip, regs->esp, error_code);
  526. force_sig(SIGSEGV, current);
  527. return;
  528. gp_in_vm86:
  529. local_irq_enable();
  530. handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
  531. return;
  532. gp_in_kernel:
  533. if (!fixup_exception(regs)) {
  534. current->thread.error_code = error_code;
  535. current->thread.trap_no = 13;
  536. if (notify_die(DIE_GPF, "general protection fault", regs,
  537. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  538. return;
  539. die("general protection fault", regs, error_code);
  540. }
  541. }
  542. static __kprobes void
  543. mem_parity_error(unsigned char reason, struct pt_regs * regs)
  544. {
  545. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
  546. "CPU %d.\n", reason, smp_processor_id());
  547. printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
  548. #if defined(CONFIG_EDAC)
  549. if(edac_handler_set()) {
  550. edac_atomic_assert_error();
  551. return;
  552. }
  553. #endif
  554. if (panic_on_unrecovered_nmi)
  555. panic("NMI: Not continuing");
  556. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  557. /* Clear and disable the memory parity error line. */
  558. clear_mem_error(reason);
  559. }
  560. static __kprobes void
  561. io_check_error(unsigned char reason, struct pt_regs * regs)
  562. {
  563. unsigned long i;
  564. printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
  565. show_registers(regs);
  566. /* Re-enable the IOCK line, wait for a few seconds */
  567. reason = (reason & 0xf) | 8;
  568. outb(reason, 0x61);
  569. i = 2000;
  570. while (--i) udelay(1000);
  571. reason &= ~8;
  572. outb(reason, 0x61);
  573. }
  574. static __kprobes void
  575. unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  576. {
  577. #ifdef CONFIG_MCA
  578. /* Might actually be able to figure out what the guilty party
  579. * is. */
  580. if( MCA_bus ) {
  581. mca_handle_nmi();
  582. return;
  583. }
  584. #endif
  585. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
  586. "CPU %d.\n", reason, smp_processor_id());
  587. printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
  588. if (panic_on_unrecovered_nmi)
  589. panic("NMI: Not continuing");
  590. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  591. }
  592. static DEFINE_SPINLOCK(nmi_print_lock);
  593. void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
  594. {
  595. if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
  596. NOTIFY_STOP)
  597. return;
  598. spin_lock(&nmi_print_lock);
  599. /*
  600. * We are in trouble anyway, lets at least try
  601. * to get a message out.
  602. */
  603. bust_spinlocks(1);
  604. printk(KERN_EMERG "%s", msg);
  605. printk(" on CPU%d, eip %08lx, registers:\n",
  606. smp_processor_id(), regs->eip);
  607. show_registers(regs);
  608. console_silent();
  609. spin_unlock(&nmi_print_lock);
  610. bust_spinlocks(0);
  611. /* If we are in kernel we are probably nested up pretty bad
  612. * and might aswell get out now while we still can.
  613. */
  614. if (!user_mode_vm(regs)) {
  615. current->thread.trap_no = 2;
  616. crash_kexec(regs);
  617. }
  618. do_exit(SIGSEGV);
  619. }
  620. static __kprobes void default_do_nmi(struct pt_regs * regs)
  621. {
  622. unsigned char reason = 0;
  623. /* Only the BSP gets external NMIs from the system. */
  624. if (!smp_processor_id())
  625. reason = get_nmi_reason();
  626. if (!(reason & 0xc0)) {
  627. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  628. == NOTIFY_STOP)
  629. return;
  630. #ifdef CONFIG_X86_LOCAL_APIC
  631. /*
  632. * Ok, so this is none of the documented NMI sources,
  633. * so it must be the NMI watchdog.
  634. */
  635. if (nmi_watchdog_tick(regs, reason))
  636. return;
  637. if (!do_nmi_callback(regs, smp_processor_id()))
  638. #endif
  639. unknown_nmi_error(reason, regs);
  640. return;
  641. }
  642. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  643. return;
  644. if (reason & 0x80)
  645. mem_parity_error(reason, regs);
  646. if (reason & 0x40)
  647. io_check_error(reason, regs);
  648. /*
  649. * Reassert NMI in case it became active meanwhile
  650. * as it's edge-triggered.
  651. */
  652. reassert_nmi();
  653. }
  654. static int ignore_nmis;
  655. fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
  656. {
  657. int cpu;
  658. nmi_enter();
  659. cpu = smp_processor_id();
  660. ++nmi_count(cpu);
  661. if (!ignore_nmis)
  662. default_do_nmi(regs);
  663. nmi_exit();
  664. }
  665. void stop_nmi(void)
  666. {
  667. acpi_nmi_disable();
  668. ignore_nmis++;
  669. }
  670. void restart_nmi(void)
  671. {
  672. ignore_nmis--;
  673. acpi_nmi_enable();
  674. }
  675. #ifdef CONFIG_KPROBES
  676. fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
  677. {
  678. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
  679. == NOTIFY_STOP)
  680. return;
  681. /* This is an interrupt gate, because kprobes wants interrupts
  682. disabled. Normal trap handlers don't. */
  683. restore_interrupts(regs);
  684. do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
  685. }
  686. #endif
  687. /*
  688. * Our handling of the processor debug registers is non-trivial.
  689. * We do not clear them on entry and exit from the kernel. Therefore
  690. * it is possible to get a watchpoint trap here from inside the kernel.
  691. * However, the code in ./ptrace.c has ensured that the user can
  692. * only set watchpoints on userspace addresses. Therefore the in-kernel
  693. * watchpoint trap can only occur in code which is reading/writing
  694. * from user space. Such code must not hold kernel locks (since it
  695. * can equally take a page fault), therefore it is safe to call
  696. * force_sig_info even though that claims and releases locks.
  697. *
  698. * Code in ./signal.c ensures that the debug control register
  699. * is restored before we deliver any signal, and therefore that
  700. * user code runs with the correct debug control register even though
  701. * we clear it here.
  702. *
  703. * Being careful here means that we don't have to be as careful in a
  704. * lot of more complicated places (task switching can be a bit lazy
  705. * about restoring all the debug state, and ptrace doesn't have to
  706. * find every occurrence of the TF bit that could be saved away even
  707. * by user code)
  708. */
  709. fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
  710. {
  711. unsigned int condition;
  712. struct task_struct *tsk = current;
  713. get_debugreg(condition, 6);
  714. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  715. SIGTRAP) == NOTIFY_STOP)
  716. return;
  717. /* It's safe to allow irq's after DR6 has been saved */
  718. if (regs->eflags & X86_EFLAGS_IF)
  719. local_irq_enable();
  720. /* Mask out spurious debug traps due to lazy DR7 setting */
  721. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  722. if (!tsk->thread.debugreg[7])
  723. goto clear_dr7;
  724. }
  725. if (regs->eflags & VM_MASK)
  726. goto debug_vm86;
  727. /* Save debug status register where ptrace can see it */
  728. tsk->thread.debugreg[6] = condition;
  729. /*
  730. * Single-stepping through TF: make sure we ignore any events in
  731. * kernel space (but re-enable TF when returning to user mode).
  732. */
  733. if (condition & DR_STEP) {
  734. /*
  735. * We already checked v86 mode above, so we can
  736. * check for kernel mode by just checking the CPL
  737. * of CS.
  738. */
  739. if (!user_mode(regs))
  740. goto clear_TF_reenable;
  741. }
  742. /* Ok, finally something we can handle */
  743. send_sigtrap(tsk, regs, error_code);
  744. /* Disable additional traps. They'll be re-enabled when
  745. * the signal is delivered.
  746. */
  747. clear_dr7:
  748. set_debugreg(0, 7);
  749. return;
  750. debug_vm86:
  751. handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
  752. return;
  753. clear_TF_reenable:
  754. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  755. regs->eflags &= ~TF_MASK;
  756. return;
  757. }
  758. /*
  759. * Note that we play around with the 'TS' bit in an attempt to get
  760. * the correct behaviour even in the presence of the asynchronous
  761. * IRQ13 behaviour
  762. */
  763. void math_error(void __user *eip)
  764. {
  765. struct task_struct * task;
  766. siginfo_t info;
  767. unsigned short cwd, swd;
  768. /*
  769. * Save the info for the exception handler and clear the error.
  770. */
  771. task = current;
  772. save_init_fpu(task);
  773. task->thread.trap_no = 16;
  774. task->thread.error_code = 0;
  775. info.si_signo = SIGFPE;
  776. info.si_errno = 0;
  777. info.si_code = __SI_FAULT;
  778. info.si_addr = eip;
  779. /*
  780. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  781. * status. 0x3f is the exception bits in these regs, 0x200 is the
  782. * C1 reg you need in case of a stack fault, 0x040 is the stack
  783. * fault bit. We should only be taking one exception at a time,
  784. * so if this combination doesn't produce any single exception,
  785. * then we have a bad program that isn't syncronizing its FPU usage
  786. * and it will suffer the consequences since we won't be able to
  787. * fully reproduce the context of the exception
  788. */
  789. cwd = get_fpu_cwd(task);
  790. swd = get_fpu_swd(task);
  791. switch (swd & ~cwd & 0x3f) {
  792. case 0x000: /* No unmasked exception */
  793. return;
  794. default: /* Multiple exceptions */
  795. break;
  796. case 0x001: /* Invalid Op */
  797. /*
  798. * swd & 0x240 == 0x040: Stack Underflow
  799. * swd & 0x240 == 0x240: Stack Overflow
  800. * User must clear the SF bit (0x40) if set
  801. */
  802. info.si_code = FPE_FLTINV;
  803. break;
  804. case 0x002: /* Denormalize */
  805. case 0x010: /* Underflow */
  806. info.si_code = FPE_FLTUND;
  807. break;
  808. case 0x004: /* Zero Divide */
  809. info.si_code = FPE_FLTDIV;
  810. break;
  811. case 0x008: /* Overflow */
  812. info.si_code = FPE_FLTOVF;
  813. break;
  814. case 0x020: /* Precision */
  815. info.si_code = FPE_FLTRES;
  816. break;
  817. }
  818. force_sig_info(SIGFPE, &info, task);
  819. }
  820. fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
  821. {
  822. ignore_fpu_irq = 1;
  823. math_error((void __user *)regs->eip);
  824. }
  825. static void simd_math_error(void __user *eip)
  826. {
  827. struct task_struct * task;
  828. siginfo_t info;
  829. unsigned short mxcsr;
  830. /*
  831. * Save the info for the exception handler and clear the error.
  832. */
  833. task = current;
  834. save_init_fpu(task);
  835. task->thread.trap_no = 19;
  836. task->thread.error_code = 0;
  837. info.si_signo = SIGFPE;
  838. info.si_errno = 0;
  839. info.si_code = __SI_FAULT;
  840. info.si_addr = eip;
  841. /*
  842. * The SIMD FPU exceptions are handled a little differently, as there
  843. * is only a single status/control register. Thus, to determine which
  844. * unmasked exception was caught we must mask the exception mask bits
  845. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  846. */
  847. mxcsr = get_fpu_mxcsr(task);
  848. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  849. case 0x000:
  850. default:
  851. break;
  852. case 0x001: /* Invalid Op */
  853. info.si_code = FPE_FLTINV;
  854. break;
  855. case 0x002: /* Denormalize */
  856. case 0x010: /* Underflow */
  857. info.si_code = FPE_FLTUND;
  858. break;
  859. case 0x004: /* Zero Divide */
  860. info.si_code = FPE_FLTDIV;
  861. break;
  862. case 0x008: /* Overflow */
  863. info.si_code = FPE_FLTOVF;
  864. break;
  865. case 0x020: /* Precision */
  866. info.si_code = FPE_FLTRES;
  867. break;
  868. }
  869. force_sig_info(SIGFPE, &info, task);
  870. }
  871. fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
  872. long error_code)
  873. {
  874. if (cpu_has_xmm) {
  875. /* Handle SIMD FPU exceptions on PIII+ processors. */
  876. ignore_fpu_irq = 1;
  877. simd_math_error((void __user *)regs->eip);
  878. } else {
  879. /*
  880. * Handle strange cache flush from user space exception
  881. * in all other cases. This is undocumented behaviour.
  882. */
  883. if (regs->eflags & VM_MASK) {
  884. handle_vm86_fault((struct kernel_vm86_regs *)regs,
  885. error_code);
  886. return;
  887. }
  888. current->thread.trap_no = 19;
  889. current->thread.error_code = error_code;
  890. die_if_kernel("cache flush denied", regs, error_code);
  891. force_sig(SIGSEGV, current);
  892. }
  893. }
  894. fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
  895. long error_code)
  896. {
  897. #if 0
  898. /* No need to warn about this any longer. */
  899. printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
  900. #endif
  901. }
  902. fastcall unsigned long patch_espfix_desc(unsigned long uesp,
  903. unsigned long kesp)
  904. {
  905. struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
  906. unsigned long base = (kesp - uesp) & -THREAD_SIZE;
  907. unsigned long new_kesp = kesp - base;
  908. unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
  909. __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
  910. /* Set up base for espfix segment */
  911. desc &= 0x00f0ff0000000000ULL;
  912. desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
  913. ((((__u64)base) << 32) & 0xff00000000000000ULL) |
  914. ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
  915. (lim_pages & 0xffff);
  916. *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
  917. return new_kesp;
  918. }
  919. /*
  920. * 'math_state_restore()' saves the current math information in the
  921. * old math state array, and gets the new ones from the current task
  922. *
  923. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  924. * Don't touch unless you *really* know how it works.
  925. *
  926. * Must be called with kernel preemption disabled (in this case,
  927. * local interrupts are disabled at the call-site in entry.S).
  928. */
  929. asmlinkage void math_state_restore(void)
  930. {
  931. struct thread_info *thread = current_thread_info();
  932. struct task_struct *tsk = thread->task;
  933. clts(); /* Allow maths ops (or we recurse) */
  934. if (!tsk_used_math(tsk))
  935. init_fpu(tsk);
  936. restore_fpu(tsk);
  937. thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
  938. tsk->fpu_counter++;
  939. }
  940. EXPORT_SYMBOL_GPL(math_state_restore);
  941. #ifndef CONFIG_MATH_EMULATION
  942. asmlinkage void math_emulate(long arg)
  943. {
  944. printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
  945. printk(KERN_EMERG "killing %s.\n",current->comm);
  946. force_sig(SIGFPE,current);
  947. schedule();
  948. }
  949. #endif /* CONFIG_MATH_EMULATION */
  950. /*
  951. * This needs to use 'idt_table' rather than 'idt', and
  952. * thus use the _nonmapped_ version of the IDT, as the
  953. * Pentium F0 0F bugfix can have resulted in the mapped
  954. * IDT being write-protected.
  955. */
  956. void set_intr_gate(unsigned int n, void *addr)
  957. {
  958. _set_gate(n, DESCTYPE_INT, addr, __KERNEL_CS);
  959. }
  960. /*
  961. * This routine sets up an interrupt gate at directory privilege level 3.
  962. */
  963. static inline void set_system_intr_gate(unsigned int n, void *addr)
  964. {
  965. _set_gate(n, DESCTYPE_INT | DESCTYPE_DPL3, addr, __KERNEL_CS);
  966. }
  967. static void __init set_trap_gate(unsigned int n, void *addr)
  968. {
  969. _set_gate(n, DESCTYPE_TRAP, addr, __KERNEL_CS);
  970. }
  971. static void __init set_system_gate(unsigned int n, void *addr)
  972. {
  973. _set_gate(n, DESCTYPE_TRAP | DESCTYPE_DPL3, addr, __KERNEL_CS);
  974. }
  975. static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
  976. {
  977. _set_gate(n, DESCTYPE_TASK, (void *)0, (gdt_entry<<3));
  978. }
  979. void __init trap_init(void)
  980. {
  981. #ifdef CONFIG_EISA
  982. void __iomem *p = ioremap(0x0FFFD9, 4);
  983. if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
  984. EISA_bus = 1;
  985. }
  986. iounmap(p);
  987. #endif
  988. #ifdef CONFIG_X86_LOCAL_APIC
  989. init_apic_mappings();
  990. #endif
  991. set_trap_gate(0,&divide_error);
  992. set_intr_gate(1,&debug);
  993. set_intr_gate(2,&nmi);
  994. set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
  995. set_system_gate(4,&overflow);
  996. set_trap_gate(5,&bounds);
  997. set_trap_gate(6,&invalid_op);
  998. set_trap_gate(7,&device_not_available);
  999. set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
  1000. set_trap_gate(9,&coprocessor_segment_overrun);
  1001. set_trap_gate(10,&invalid_TSS);
  1002. set_trap_gate(11,&segment_not_present);
  1003. set_trap_gate(12,&stack_segment);
  1004. set_trap_gate(13,&general_protection);
  1005. set_intr_gate(14,&page_fault);
  1006. set_trap_gate(15,&spurious_interrupt_bug);
  1007. set_trap_gate(16,&coprocessor_error);
  1008. set_trap_gate(17,&alignment_check);
  1009. #ifdef CONFIG_X86_MCE
  1010. set_trap_gate(18,&machine_check);
  1011. #endif
  1012. set_trap_gate(19,&simd_coprocessor_error);
  1013. if (cpu_has_fxsr) {
  1014. /*
  1015. * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
  1016. * Generates a compile-time "error: zero width for bit-field" if
  1017. * the alignment is wrong.
  1018. */
  1019. struct fxsrAlignAssert {
  1020. int _:!(offsetof(struct task_struct,
  1021. thread.i387.fxsave) & 15);
  1022. };
  1023. printk(KERN_INFO "Enabling fast FPU save and restore... ");
  1024. set_in_cr4(X86_CR4_OSFXSR);
  1025. printk("done.\n");
  1026. }
  1027. if (cpu_has_xmm) {
  1028. printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
  1029. "support... ");
  1030. set_in_cr4(X86_CR4_OSXMMEXCPT);
  1031. printk("done.\n");
  1032. }
  1033. set_system_gate(SYSCALL_VECTOR,&system_call);
  1034. /*
  1035. * Should be a barrier for any external CPU state.
  1036. */
  1037. cpu_init();
  1038. trap_init_hook();
  1039. }
  1040. static int __init kstack_setup(char *s)
  1041. {
  1042. kstack_depth_to_print = simple_strtoul(s, NULL, 0);
  1043. return 1;
  1044. }
  1045. __setup("kstack=", kstack_setup);
  1046. static int __init code_bytes_setup(char *s)
  1047. {
  1048. code_bytes = simple_strtoul(s, NULL, 0);
  1049. if (code_bytes > 8192)
  1050. code_bytes = 8192;
  1051. return 1;
  1052. }
  1053. __setup("code_bytes=", code_bytes_setup);