traps.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*
  2. * linux/arch/x86-64/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  6. *
  7. * Pentium III FXSR, SSE support
  8. * Gareth Hughes <gareth@valinux.com>, May 2000
  9. */
  10. /*
  11. * 'Traps.c' handles hardware traps and faults after we have saved some
  12. * state in 'entry.S'.
  13. */
  14. #include <linux/config.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/timer.h>
  21. #include <linux/mm.h>
  22. #include <linux/init.h>
  23. #include <linux/delay.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/nmi.h>
  29. #include <linux/kprobes.h>
  30. #include <linux/kexec.h>
  31. #include <linux/unwind.h>
  32. #include <asm/system.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/io.h>
  35. #include <asm/atomic.h>
  36. #include <asm/debugreg.h>
  37. #include <asm/desc.h>
  38. #include <asm/i387.h>
  39. #include <asm/kdebug.h>
  40. #include <asm/processor.h>
  41. #include <asm/unwind.h>
  42. #include <asm/smp.h>
  43. #include <asm/pgalloc.h>
  44. #include <asm/pda.h>
  45. #include <asm/proto.h>
  46. #include <asm/nmi.h>
  47. asmlinkage void divide_error(void);
  48. asmlinkage void debug(void);
  49. asmlinkage void nmi(void);
  50. asmlinkage void int3(void);
  51. asmlinkage void overflow(void);
  52. asmlinkage void bounds(void);
  53. asmlinkage void invalid_op(void);
  54. asmlinkage void device_not_available(void);
  55. asmlinkage void double_fault(void);
  56. asmlinkage void coprocessor_segment_overrun(void);
  57. asmlinkage void invalid_TSS(void);
  58. asmlinkage void segment_not_present(void);
  59. asmlinkage void stack_segment(void);
  60. asmlinkage void general_protection(void);
  61. asmlinkage void page_fault(void);
  62. asmlinkage void coprocessor_error(void);
  63. asmlinkage void simd_coprocessor_error(void);
  64. asmlinkage void reserved(void);
  65. asmlinkage void alignment_check(void);
  66. asmlinkage void machine_check(void);
  67. asmlinkage void spurious_interrupt_bug(void);
  68. ATOMIC_NOTIFIER_HEAD(die_chain);
  69. int register_die_notifier(struct notifier_block *nb)
  70. {
  71. vmalloc_sync_all();
  72. return atomic_notifier_chain_register(&die_chain, nb);
  73. }
  74. EXPORT_SYMBOL(register_die_notifier);
  75. int unregister_die_notifier(struct notifier_block *nb)
  76. {
  77. return atomic_notifier_chain_unregister(&die_chain, nb);
  78. }
  79. EXPORT_SYMBOL(unregister_die_notifier);
  80. static inline void conditional_sti(struct pt_regs *regs)
  81. {
  82. if (regs->eflags & X86_EFLAGS_IF)
  83. local_irq_enable();
  84. }
  85. static inline void preempt_conditional_sti(struct pt_regs *regs)
  86. {
  87. preempt_disable();
  88. if (regs->eflags & X86_EFLAGS_IF)
  89. local_irq_enable();
  90. }
  91. static inline void preempt_conditional_cli(struct pt_regs *regs)
  92. {
  93. if (regs->eflags & X86_EFLAGS_IF)
  94. local_irq_disable();
  95. /* Make sure to not schedule here because we could be running
  96. on an exception stack. */
  97. preempt_enable_no_resched();
  98. }
  99. static int kstack_depth_to_print = 10;
  100. #ifdef CONFIG_KALLSYMS
  101. #include <linux/kallsyms.h>
  102. int printk_address(unsigned long address)
  103. {
  104. unsigned long offset = 0, symsize;
  105. const char *symname;
  106. char *modname;
  107. char *delim = ":";
  108. char namebuf[128];
  109. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  110. if (!symname)
  111. return printk("[<%016lx>]", address);
  112. if (!modname)
  113. modname = delim = "";
  114. return printk("<%016lx>{%s%s%s%s%+ld}",
  115. address, delim, modname, delim, symname, offset);
  116. }
  117. #else
  118. int printk_address(unsigned long address)
  119. {
  120. return printk("[<%016lx>]", address);
  121. }
  122. #endif
  123. static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  124. unsigned *usedp, const char **idp)
  125. {
  126. static char ids[][8] = {
  127. [DEBUG_STACK - 1] = "#DB",
  128. [NMI_STACK - 1] = "NMI",
  129. [DOUBLEFAULT_STACK - 1] = "#DF",
  130. [STACKFAULT_STACK - 1] = "#SS",
  131. [MCE_STACK - 1] = "#MC",
  132. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  133. [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
  134. #endif
  135. };
  136. unsigned k;
  137. for (k = 0; k < N_EXCEPTION_STACKS; k++) {
  138. unsigned long end;
  139. switch (k + 1) {
  140. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  141. case DEBUG_STACK:
  142. end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
  143. break;
  144. #endif
  145. default:
  146. end = per_cpu(init_tss, cpu).ist[k];
  147. break;
  148. }
  149. if (stack >= end)
  150. continue;
  151. if (stack >= end - EXCEPTION_STKSZ) {
  152. if (*usedp & (1U << k))
  153. break;
  154. *usedp |= 1U << k;
  155. *idp = ids[k];
  156. return (unsigned long *)end;
  157. }
  158. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  159. if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
  160. unsigned j = N_EXCEPTION_STACKS - 1;
  161. do {
  162. ++j;
  163. end -= EXCEPTION_STKSZ;
  164. ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
  165. } while (stack < end - EXCEPTION_STKSZ);
  166. if (*usedp & (1U << j))
  167. break;
  168. *usedp |= 1U << j;
  169. *idp = ids[j];
  170. return (unsigned long *)end;
  171. }
  172. #endif
  173. }
  174. return NULL;
  175. }
  176. static void show_trace_unwind(struct unwind_frame_info *info, void *context)
  177. {
  178. int i = 11;
  179. while (unwind(info) == 0 && UNW_PC(info)) {
  180. if (i > 50) {
  181. printk("\n ");
  182. i = 7;
  183. } else
  184. i += printk(" ");
  185. i += printk_address(UNW_PC(info));
  186. if (arch_unw_user_mode(info))
  187. break;
  188. }
  189. printk("\n");
  190. }
  191. /*
  192. * x86-64 can have upto three kernel stacks:
  193. * process stack
  194. * interrupt stack
  195. * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  196. */
  197. void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
  198. {
  199. const unsigned cpu = safe_smp_processor_id();
  200. unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
  201. int i;
  202. unsigned used = 0;
  203. struct unwind_frame_info info;
  204. printk("\nCall Trace:");
  205. if (!tsk)
  206. tsk = current;
  207. if (regs) {
  208. if (unwind_init_frame_info(&info, tsk, regs) == 0) {
  209. show_trace_unwind(&info, NULL);
  210. return;
  211. }
  212. } else if (tsk == current) {
  213. if (unwind_init_running(&info, show_trace_unwind, NULL) == 0)
  214. return;
  215. } else {
  216. if (unwind_init_blocked(&info, tsk) == 0) {
  217. show_trace_unwind(&info, NULL);
  218. return;
  219. }
  220. }
  221. #define HANDLE_STACK(cond) \
  222. do while (cond) { \
  223. unsigned long addr = *stack++; \
  224. if (kernel_text_address(addr)) { \
  225. if (i > 50) { \
  226. printk("\n "); \
  227. i = 0; \
  228. } \
  229. else \
  230. i += printk(" "); \
  231. /* \
  232. * If the address is either in the text segment of the \
  233. * kernel, or in the region which contains vmalloc'ed \
  234. * memory, it *may* be the address of a calling \
  235. * routine; if so, print it so that someone tracing \
  236. * down the cause of the crash will be able to figure \
  237. * out the call path that was taken. \
  238. */ \
  239. i += printk_address(addr); \
  240. } \
  241. } while (0)
  242. for(i = 11; ; ) {
  243. const char *id;
  244. unsigned long *estack_end;
  245. estack_end = in_exception_stack(cpu, (unsigned long)stack,
  246. &used, &id);
  247. if (estack_end) {
  248. i += printk(" <%s>", id);
  249. HANDLE_STACK (stack < estack_end);
  250. i += printk(" <EOE>");
  251. stack = (unsigned long *) estack_end[-2];
  252. continue;
  253. }
  254. if (irqstack_end) {
  255. unsigned long *irqstack;
  256. irqstack = irqstack_end -
  257. (IRQSTACKSIZE - 64) / sizeof(*irqstack);
  258. if (stack >= irqstack && stack < irqstack_end) {
  259. i += printk(" <IRQ>");
  260. HANDLE_STACK (stack < irqstack_end);
  261. stack = (unsigned long *) (irqstack_end[-1]);
  262. irqstack_end = NULL;
  263. i += printk(" <EOI>");
  264. continue;
  265. }
  266. }
  267. break;
  268. }
  269. HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
  270. #undef HANDLE_STACK
  271. printk("\n");
  272. }
  273. static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
  274. {
  275. unsigned long *stack;
  276. int i;
  277. const int cpu = safe_smp_processor_id();
  278. unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
  279. unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
  280. // debugging aid: "show_stack(NULL, NULL);" prints the
  281. // back trace for this cpu.
  282. if (rsp == NULL) {
  283. if (tsk)
  284. rsp = (unsigned long *)tsk->thread.rsp;
  285. else
  286. rsp = (unsigned long *)&rsp;
  287. }
  288. stack = rsp;
  289. for(i=0; i < kstack_depth_to_print; i++) {
  290. if (stack >= irqstack && stack <= irqstack_end) {
  291. if (stack == irqstack_end) {
  292. stack = (unsigned long *) (irqstack_end[-1]);
  293. printk(" <EOI> ");
  294. }
  295. } else {
  296. if (((long) stack & (THREAD_SIZE-1)) == 0)
  297. break;
  298. }
  299. if (i && ((i % 4) == 0))
  300. printk("\n ");
  301. printk("%016lx ", *stack++);
  302. touch_nmi_watchdog();
  303. }
  304. show_trace(tsk, regs, rsp);
  305. }
  306. void show_stack(struct task_struct *tsk, unsigned long * rsp)
  307. {
  308. _show_stack(tsk, NULL, rsp);
  309. }
  310. /*
  311. * The architecture-independent dump_stack generator
  312. */
  313. void dump_stack(void)
  314. {
  315. unsigned long dummy;
  316. show_trace(NULL, NULL, &dummy);
  317. }
  318. EXPORT_SYMBOL(dump_stack);
  319. void show_registers(struct pt_regs *regs)
  320. {
  321. int i;
  322. int in_kernel = !user_mode(regs);
  323. unsigned long rsp;
  324. const int cpu = safe_smp_processor_id();
  325. struct task_struct *cur = cpu_pda(cpu)->pcurrent;
  326. rsp = regs->rsp;
  327. printk("CPU %d ", cpu);
  328. __show_regs(regs);
  329. printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
  330. cur->comm, cur->pid, task_thread_info(cur), cur);
  331. /*
  332. * When in-kernel, we also print out the stack and code at the
  333. * time of the fault..
  334. */
  335. if (in_kernel) {
  336. printk("Stack: ");
  337. _show_stack(NULL, regs, (unsigned long*)rsp);
  338. printk("\nCode: ");
  339. if (regs->rip < PAGE_OFFSET)
  340. goto bad;
  341. for (i=0; i<20; i++) {
  342. unsigned char c;
  343. if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
  344. bad:
  345. printk(" Bad RIP value.");
  346. break;
  347. }
  348. printk("%02x ", c);
  349. }
  350. }
  351. printk("\n");
  352. }
  353. void handle_BUG(struct pt_regs *regs)
  354. {
  355. struct bug_frame f;
  356. long len;
  357. const char *prefix = "";
  358. if (user_mode(regs))
  359. return;
  360. if (__copy_from_user(&f, (const void __user *) regs->rip,
  361. sizeof(struct bug_frame)))
  362. return;
  363. if (f.filename >= 0 ||
  364. f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
  365. return;
  366. len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
  367. if (len < 0 || len >= PATH_MAX)
  368. f.filename = (int)(long)"unmapped filename";
  369. else if (len > 50) {
  370. f.filename += len - 50;
  371. prefix = "...";
  372. }
  373. printk("----------- [cut here ] --------- [please bite here ] ---------\n");
  374. printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
  375. }
  376. #ifdef CONFIG_BUG
  377. void out_of_line_bug(void)
  378. {
  379. BUG();
  380. }
  381. #endif
  382. static DEFINE_SPINLOCK(die_lock);
  383. static int die_owner = -1;
  384. static unsigned int die_nest_count;
  385. unsigned __kprobes long oops_begin(void)
  386. {
  387. int cpu = safe_smp_processor_id();
  388. unsigned long flags;
  389. /* racy, but better than risking deadlock. */
  390. local_irq_save(flags);
  391. if (!spin_trylock(&die_lock)) {
  392. if (cpu == die_owner)
  393. /* nested oops. should stop eventually */;
  394. else
  395. spin_lock(&die_lock);
  396. }
  397. die_nest_count++;
  398. die_owner = cpu;
  399. console_verbose();
  400. bust_spinlocks(1);
  401. return flags;
  402. }
  403. void __kprobes oops_end(unsigned long flags)
  404. {
  405. die_owner = -1;
  406. bust_spinlocks(0);
  407. die_nest_count--;
  408. if (die_nest_count)
  409. /* We still own the lock */
  410. local_irq_restore(flags);
  411. else
  412. /* Nest count reaches zero, release the lock. */
  413. spin_unlock_irqrestore(&die_lock, flags);
  414. if (panic_on_oops)
  415. panic("Oops");
  416. }
  417. void __kprobes __die(const char * str, struct pt_regs * regs, long err)
  418. {
  419. static int die_counter;
  420. printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
  421. #ifdef CONFIG_PREEMPT
  422. printk("PREEMPT ");
  423. #endif
  424. #ifdef CONFIG_SMP
  425. printk("SMP ");
  426. #endif
  427. #ifdef CONFIG_DEBUG_PAGEALLOC
  428. printk("DEBUG_PAGEALLOC");
  429. #endif
  430. printk("\n");
  431. notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
  432. show_registers(regs);
  433. /* Executive summary in case the oops scrolled away */
  434. printk(KERN_ALERT "RIP ");
  435. printk_address(regs->rip);
  436. printk(" RSP <%016lx>\n", regs->rsp);
  437. if (kexec_should_crash(current))
  438. crash_kexec(regs);
  439. }
  440. void die(const char * str, struct pt_regs * regs, long err)
  441. {
  442. unsigned long flags = oops_begin();
  443. handle_BUG(regs);
  444. __die(str, regs, err);
  445. oops_end(flags);
  446. do_exit(SIGSEGV);
  447. }
  448. void __kprobes die_nmi(char *str, struct pt_regs *regs)
  449. {
  450. unsigned long flags = oops_begin();
  451. /*
  452. * We are in trouble anyway, lets at least try
  453. * to get a message out.
  454. */
  455. printk(str, safe_smp_processor_id());
  456. show_registers(regs);
  457. if (kexec_should_crash(current))
  458. crash_kexec(regs);
  459. if (panic_on_timeout || panic_on_oops)
  460. panic("nmi watchdog");
  461. printk("console shuts up ...\n");
  462. oops_end(flags);
  463. nmi_exit();
  464. local_irq_enable();
  465. do_exit(SIGSEGV);
  466. }
  467. static void __kprobes do_trap(int trapnr, int signr, char *str,
  468. struct pt_regs * regs, long error_code,
  469. siginfo_t *info)
  470. {
  471. struct task_struct *tsk = current;
  472. tsk->thread.error_code = error_code;
  473. tsk->thread.trap_no = trapnr;
  474. if (user_mode(regs)) {
  475. if (exception_trace && unhandled_signal(tsk, signr))
  476. printk(KERN_INFO
  477. "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
  478. tsk->comm, tsk->pid, str,
  479. regs->rip, regs->rsp, error_code);
  480. if (info)
  481. force_sig_info(signr, info, tsk);
  482. else
  483. force_sig(signr, tsk);
  484. return;
  485. }
  486. /* kernel trap */
  487. {
  488. const struct exception_table_entry *fixup;
  489. fixup = search_exception_tables(regs->rip);
  490. if (fixup)
  491. regs->rip = fixup->fixup;
  492. else
  493. die(str, regs, error_code);
  494. return;
  495. }
  496. }
  497. #define DO_ERROR(trapnr, signr, str, name) \
  498. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  499. { \
  500. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  501. == NOTIFY_STOP) \
  502. return; \
  503. conditional_sti(regs); \
  504. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  505. }
  506. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  507. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  508. { \
  509. siginfo_t info; \
  510. info.si_signo = signr; \
  511. info.si_errno = 0; \
  512. info.si_code = sicode; \
  513. info.si_addr = (void __user *)siaddr; \
  514. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  515. == NOTIFY_STOP) \
  516. return; \
  517. conditional_sti(regs); \
  518. do_trap(trapnr, signr, str, regs, error_code, &info); \
  519. }
  520. DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
  521. DO_ERROR( 4, SIGSEGV, "overflow", overflow)
  522. DO_ERROR( 5, SIGSEGV, "bounds", bounds)
  523. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
  524. DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
  525. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  526. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  527. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  528. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  529. DO_ERROR(18, SIGSEGV, "reserved", reserved)
  530. /* Runs on IST stack */
  531. asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  532. {
  533. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  534. 12, SIGBUS) == NOTIFY_STOP)
  535. return;
  536. preempt_conditional_sti(regs);
  537. do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  538. preempt_conditional_cli(regs);
  539. }
  540. asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
  541. {
  542. static const char str[] = "double fault";
  543. struct task_struct *tsk = current;
  544. /* Return not checked because double check cannot be ignored */
  545. notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  546. tsk->thread.error_code = error_code;
  547. tsk->thread.trap_no = 8;
  548. /* This is always a kernel trap and never fixable (and thus must
  549. never return). */
  550. for (;;)
  551. die(str, regs, error_code);
  552. }
  553. asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
  554. long error_code)
  555. {
  556. struct task_struct *tsk = current;
  557. conditional_sti(regs);
  558. tsk->thread.error_code = error_code;
  559. tsk->thread.trap_no = 13;
  560. if (user_mode(regs)) {
  561. if (exception_trace && unhandled_signal(tsk, SIGSEGV))
  562. printk(KERN_INFO
  563. "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
  564. tsk->comm, tsk->pid,
  565. regs->rip, regs->rsp, error_code);
  566. force_sig(SIGSEGV, tsk);
  567. return;
  568. }
  569. /* kernel gp */
  570. {
  571. const struct exception_table_entry *fixup;
  572. fixup = search_exception_tables(regs->rip);
  573. if (fixup) {
  574. regs->rip = fixup->fixup;
  575. return;
  576. }
  577. if (notify_die(DIE_GPF, "general protection fault", regs,
  578. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  579. return;
  580. die("general protection fault", regs, error_code);
  581. }
  582. }
  583. static __kprobes void
  584. mem_parity_error(unsigned char reason, struct pt_regs * regs)
  585. {
  586. printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
  587. printk("You probably have a hardware problem with your RAM chips\n");
  588. /* Clear and disable the memory parity error line. */
  589. reason = (reason & 0xf) | 4;
  590. outb(reason, 0x61);
  591. }
  592. static __kprobes void
  593. io_check_error(unsigned char reason, struct pt_regs * regs)
  594. {
  595. printk("NMI: IOCK error (debug interrupt?)\n");
  596. show_registers(regs);
  597. /* Re-enable the IOCK line, wait for a few seconds */
  598. reason = (reason & 0xf) | 8;
  599. outb(reason, 0x61);
  600. mdelay(2000);
  601. reason &= ~8;
  602. outb(reason, 0x61);
  603. }
  604. static __kprobes void
  605. unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  606. { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
  607. printk("Dazed and confused, but trying to continue\n");
  608. printk("Do you have a strange power saving mode enabled?\n");
  609. }
  610. /* Runs on IST stack. This code must keep interrupts off all the time.
  611. Nested NMIs are prevented by the CPU. */
  612. asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
  613. {
  614. unsigned char reason = 0;
  615. int cpu;
  616. cpu = smp_processor_id();
  617. /* Only the BSP gets external NMIs from the system. */
  618. if (!cpu)
  619. reason = get_nmi_reason();
  620. if (!(reason & 0xc0)) {
  621. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  622. == NOTIFY_STOP)
  623. return;
  624. #ifdef CONFIG_X86_LOCAL_APIC
  625. /*
  626. * Ok, so this is none of the documented NMI sources,
  627. * so it must be the NMI watchdog.
  628. */
  629. if (nmi_watchdog > 0) {
  630. nmi_watchdog_tick(regs,reason);
  631. return;
  632. }
  633. #endif
  634. unknown_nmi_error(reason, regs);
  635. return;
  636. }
  637. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  638. return;
  639. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  640. if (reason & 0x80)
  641. mem_parity_error(reason, regs);
  642. if (reason & 0x40)
  643. io_check_error(reason, regs);
  644. }
  645. /* runs on IST stack. */
  646. asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
  647. {
  648. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
  649. return;
  650. }
  651. preempt_conditional_sti(regs);
  652. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  653. preempt_conditional_cli(regs);
  654. }
  655. /* Help handler running on IST stack to switch back to user stack
  656. for scheduling or signal handling. The actual stack switch is done in
  657. entry.S */
  658. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  659. {
  660. struct pt_regs *regs = eregs;
  661. /* Did already sync */
  662. if (eregs == (struct pt_regs *)eregs->rsp)
  663. ;
  664. /* Exception from user space */
  665. else if (user_mode(eregs))
  666. regs = task_pt_regs(current);
  667. /* Exception from kernel and interrupts are enabled. Move to
  668. kernel process stack. */
  669. else if (eregs->eflags & X86_EFLAGS_IF)
  670. regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
  671. if (eregs != regs)
  672. *regs = *eregs;
  673. return regs;
  674. }
  675. /* runs on IST stack. */
  676. asmlinkage void __kprobes do_debug(struct pt_regs * regs,
  677. unsigned long error_code)
  678. {
  679. unsigned long condition;
  680. struct task_struct *tsk = current;
  681. siginfo_t info;
  682. get_debugreg(condition, 6);
  683. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  684. SIGTRAP) == NOTIFY_STOP)
  685. return;
  686. preempt_conditional_sti(regs);
  687. /* Mask out spurious debug traps due to lazy DR7 setting */
  688. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  689. if (!tsk->thread.debugreg7) {
  690. goto clear_dr7;
  691. }
  692. }
  693. tsk->thread.debugreg6 = condition;
  694. /* Mask out spurious TF errors due to lazy TF clearing */
  695. if (condition & DR_STEP) {
  696. /*
  697. * The TF error should be masked out only if the current
  698. * process is not traced and if the TRAP flag has been set
  699. * previously by a tracing process (condition detected by
  700. * the PT_DTRACE flag); remember that the i386 TRAP flag
  701. * can be modified by the process itself in user mode,
  702. * allowing programs to debug themselves without the ptrace()
  703. * interface.
  704. */
  705. if (!user_mode(regs))
  706. goto clear_TF_reenable;
  707. /*
  708. * Was the TF flag set by a debugger? If so, clear it now,
  709. * so that register information is correct.
  710. */
  711. if (tsk->ptrace & PT_DTRACE) {
  712. regs->eflags &= ~TF_MASK;
  713. tsk->ptrace &= ~PT_DTRACE;
  714. }
  715. }
  716. /* Ok, finally something we can handle */
  717. tsk->thread.trap_no = 1;
  718. tsk->thread.error_code = error_code;
  719. info.si_signo = SIGTRAP;
  720. info.si_errno = 0;
  721. info.si_code = TRAP_BRKPT;
  722. info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
  723. force_sig_info(SIGTRAP, &info, tsk);
  724. clear_dr7:
  725. set_debugreg(0UL, 7);
  726. preempt_conditional_cli(regs);
  727. return;
  728. clear_TF_reenable:
  729. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  730. regs->eflags &= ~TF_MASK;
  731. preempt_conditional_cli(regs);
  732. }
  733. static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
  734. {
  735. const struct exception_table_entry *fixup;
  736. fixup = search_exception_tables(regs->rip);
  737. if (fixup) {
  738. regs->rip = fixup->fixup;
  739. return 1;
  740. }
  741. notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
  742. /* Illegal floating point operation in the kernel */
  743. current->thread.trap_no = trapnr;
  744. die(str, regs, 0);
  745. return 0;
  746. }
  747. /*
  748. * Note that we play around with the 'TS' bit in an attempt to get
  749. * the correct behaviour even in the presence of the asynchronous
  750. * IRQ13 behaviour
  751. */
  752. asmlinkage void do_coprocessor_error(struct pt_regs *regs)
  753. {
  754. void __user *rip = (void __user *)(regs->rip);
  755. struct task_struct * task;
  756. siginfo_t info;
  757. unsigned short cwd, swd;
  758. conditional_sti(regs);
  759. if (!user_mode(regs) &&
  760. kernel_math_error(regs, "kernel x87 math error", 16))
  761. return;
  762. /*
  763. * Save the info for the exception handler and clear the error.
  764. */
  765. task = current;
  766. save_init_fpu(task);
  767. task->thread.trap_no = 16;
  768. task->thread.error_code = 0;
  769. info.si_signo = SIGFPE;
  770. info.si_errno = 0;
  771. info.si_code = __SI_FAULT;
  772. info.si_addr = rip;
  773. /*
  774. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  775. * status. 0x3f is the exception bits in these regs, 0x200 is the
  776. * C1 reg you need in case of a stack fault, 0x040 is the stack
  777. * fault bit. We should only be taking one exception at a time,
  778. * so if this combination doesn't produce any single exception,
  779. * then we have a bad program that isn't synchronizing its FPU usage
  780. * and it will suffer the consequences since we won't be able to
  781. * fully reproduce the context of the exception
  782. */
  783. cwd = get_fpu_cwd(task);
  784. swd = get_fpu_swd(task);
  785. switch (swd & ~cwd & 0x3f) {
  786. case 0x000:
  787. default:
  788. break;
  789. case 0x001: /* Invalid Op */
  790. /*
  791. * swd & 0x240 == 0x040: Stack Underflow
  792. * swd & 0x240 == 0x240: Stack Overflow
  793. * User must clear the SF bit (0x40) if set
  794. */
  795. info.si_code = FPE_FLTINV;
  796. break;
  797. case 0x002: /* Denormalize */
  798. case 0x010: /* Underflow */
  799. info.si_code = FPE_FLTUND;
  800. break;
  801. case 0x004: /* Zero Divide */
  802. info.si_code = FPE_FLTDIV;
  803. break;
  804. case 0x008: /* Overflow */
  805. info.si_code = FPE_FLTOVF;
  806. break;
  807. case 0x020: /* Precision */
  808. info.si_code = FPE_FLTRES;
  809. break;
  810. }
  811. force_sig_info(SIGFPE, &info, task);
  812. }
  813. asmlinkage void bad_intr(void)
  814. {
  815. printk("bad interrupt");
  816. }
  817. asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
  818. {
  819. void __user *rip = (void __user *)(regs->rip);
  820. struct task_struct * task;
  821. siginfo_t info;
  822. unsigned short mxcsr;
  823. conditional_sti(regs);
  824. if (!user_mode(regs) &&
  825. kernel_math_error(regs, "kernel simd math error", 19))
  826. return;
  827. /*
  828. * Save the info for the exception handler and clear the error.
  829. */
  830. task = current;
  831. save_init_fpu(task);
  832. task->thread.trap_no = 19;
  833. task->thread.error_code = 0;
  834. info.si_signo = SIGFPE;
  835. info.si_errno = 0;
  836. info.si_code = __SI_FAULT;
  837. info.si_addr = rip;
  838. /*
  839. * The SIMD FPU exceptions are handled a little differently, as there
  840. * is only a single status/control register. Thus, to determine which
  841. * unmasked exception was caught we must mask the exception mask bits
  842. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  843. */
  844. mxcsr = get_fpu_mxcsr(task);
  845. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  846. case 0x000:
  847. default:
  848. break;
  849. case 0x001: /* Invalid Op */
  850. info.si_code = FPE_FLTINV;
  851. break;
  852. case 0x002: /* Denormalize */
  853. case 0x010: /* Underflow */
  854. info.si_code = FPE_FLTUND;
  855. break;
  856. case 0x004: /* Zero Divide */
  857. info.si_code = FPE_FLTDIV;
  858. break;
  859. case 0x008: /* Overflow */
  860. info.si_code = FPE_FLTOVF;
  861. break;
  862. case 0x020: /* Precision */
  863. info.si_code = FPE_FLTRES;
  864. break;
  865. }
  866. force_sig_info(SIGFPE, &info, task);
  867. }
  868. asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
  869. {
  870. }
  871. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  872. {
  873. }
  874. asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
  875. {
  876. }
  877. /*
  878. * 'math_state_restore()' saves the current math information in the
  879. * old math state array, and gets the new ones from the current task
  880. *
  881. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  882. * Don't touch unless you *really* know how it works.
  883. */
  884. asmlinkage void math_state_restore(void)
  885. {
  886. struct task_struct *me = current;
  887. clts(); /* Allow maths ops (or we recurse) */
  888. if (!used_math())
  889. init_fpu(me);
  890. restore_fpu_checking(&me->thread.i387.fxsave);
  891. task_thread_info(me)->status |= TS_USEDFPU;
  892. }
  893. void __init trap_init(void)
  894. {
  895. set_intr_gate(0,&divide_error);
  896. set_intr_gate_ist(1,&debug,DEBUG_STACK);
  897. set_intr_gate_ist(2,&nmi,NMI_STACK);
  898. set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
  899. set_system_gate(4,&overflow); /* int4 can be called from all */
  900. set_intr_gate(5,&bounds);
  901. set_intr_gate(6,&invalid_op);
  902. set_intr_gate(7,&device_not_available);
  903. set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
  904. set_intr_gate(9,&coprocessor_segment_overrun);
  905. set_intr_gate(10,&invalid_TSS);
  906. set_intr_gate(11,&segment_not_present);
  907. set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
  908. set_intr_gate(13,&general_protection);
  909. set_intr_gate(14,&page_fault);
  910. set_intr_gate(15,&spurious_interrupt_bug);
  911. set_intr_gate(16,&coprocessor_error);
  912. set_intr_gate(17,&alignment_check);
  913. #ifdef CONFIG_X86_MCE
  914. set_intr_gate_ist(18,&machine_check, MCE_STACK);
  915. #endif
  916. set_intr_gate(19,&simd_coprocessor_error);
  917. #ifdef CONFIG_IA32_EMULATION
  918. set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  919. #endif
  920. /*
  921. * Should be a barrier for any external CPU state.
  922. */
  923. cpu_init();
  924. }
  925. /* Actual parsing is done early in setup.c. */
  926. static int __init oops_dummy(char *s)
  927. {
  928. panic_on_oops = 1;
  929. return 1;
  930. }
  931. __setup("oops=", oops_dummy);
  932. static int __init kstack_setup(char *s)
  933. {
  934. kstack_depth_to_print = simple_strtoul(s,NULL,0);
  935. return 1;
  936. }
  937. __setup("kstack=", kstack_setup);