traps_64.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * 'Traps.c' handles hardware traps and faults after we have saved some
  10. * state in 'entry.S'.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/timer.h>
  18. #include <linux/mm.h>
  19. #include <linux/init.h>
  20. #include <linux/delay.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/kallsyms.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/nmi.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/kexec.h>
  29. #include <linux/unwind.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/bug.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/utsname.h>
  34. #if defined(CONFIG_EDAC)
  35. #include <linux/edac.h>
  36. #endif
  37. #include <asm/system.h>
  38. #include <asm/io.h>
  39. #include <asm/atomic.h>
  40. #include <asm/debugreg.h>
  41. #include <asm/desc.h>
  42. #include <asm/i387.h>
  43. #include <asm/processor.h>
  44. #include <asm/unwind.h>
  45. #include <asm/smp.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/pda.h>
  48. #include <asm/proto.h>
  49. #include <asm/nmi.h>
  50. #include <asm/stacktrace.h>
  51. asmlinkage void divide_error(void);
  52. asmlinkage void debug(void);
  53. asmlinkage void nmi(void);
  54. asmlinkage void int3(void);
  55. asmlinkage void overflow(void);
  56. asmlinkage void bounds(void);
  57. asmlinkage void invalid_op(void);
  58. asmlinkage void device_not_available(void);
  59. asmlinkage void double_fault(void);
  60. asmlinkage void coprocessor_segment_overrun(void);
  61. asmlinkage void invalid_TSS(void);
  62. asmlinkage void segment_not_present(void);
  63. asmlinkage void stack_segment(void);
  64. asmlinkage void general_protection(void);
  65. asmlinkage void page_fault(void);
  66. asmlinkage void coprocessor_error(void);
  67. asmlinkage void simd_coprocessor_error(void);
  68. asmlinkage void reserved(void);
  69. asmlinkage void alignment_check(void);
  70. asmlinkage void machine_check(void);
  71. asmlinkage void spurious_interrupt_bug(void);
  72. static unsigned int code_bytes = 64;
  73. static inline void conditional_sti(struct pt_regs *regs)
  74. {
  75. if (regs->flags & X86_EFLAGS_IF)
  76. local_irq_enable();
  77. }
  78. static inline void preempt_conditional_sti(struct pt_regs *regs)
  79. {
  80. preempt_disable();
  81. if (regs->flags & X86_EFLAGS_IF)
  82. local_irq_enable();
  83. }
  84. static inline void preempt_conditional_cli(struct pt_regs *regs)
  85. {
  86. if (regs->flags & X86_EFLAGS_IF)
  87. local_irq_disable();
  88. /* Make sure to not schedule here because we could be running
  89. on an exception stack. */
  90. preempt_enable_no_resched();
  91. }
  92. int kstack_depth_to_print = 12;
  93. #ifdef CONFIG_KALLSYMS
  94. void printk_address(unsigned long address, int reliable)
  95. {
  96. unsigned long offset = 0, symsize;
  97. const char *symname;
  98. char *modname;
  99. char *delim = ":";
  100. char namebuf[128];
  101. char reliab[4] = "";;
  102. symname = kallsyms_lookup(address, &symsize, &offset,
  103. &modname, namebuf);
  104. if (!symname) {
  105. printk(" [<%016lx>]\n", address);
  106. return;
  107. }
  108. if (!reliable)
  109. strcpy(reliab, "? ");
  110. if (!modname)
  111. modname = delim = "";
  112. printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
  113. address, reliab, delim, modname, delim, symname, offset, symsize);
  114. }
  115. #else
  116. void printk_address(unsigned long address, int reliable)
  117. {
  118. printk(" [<%016lx>]\n", address);
  119. }
  120. #endif
  121. static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  122. unsigned *usedp, char **idp)
  123. {
  124. static char ids[][8] = {
  125. [DEBUG_STACK - 1] = "#DB",
  126. [NMI_STACK - 1] = "NMI",
  127. [DOUBLEFAULT_STACK - 1] = "#DF",
  128. [STACKFAULT_STACK - 1] = "#SS",
  129. [MCE_STACK - 1] = "#MC",
  130. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  131. [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
  132. #endif
  133. };
  134. unsigned k;
  135. /*
  136. * Iterate over all exception stacks, and figure out whether
  137. * 'stack' is in one of them:
  138. */
  139. for (k = 0; k < N_EXCEPTION_STACKS; k++) {
  140. unsigned long end = per_cpu(orig_ist, cpu).ist[k];
  141. /*
  142. * Is 'stack' above this exception frame's end?
  143. * If yes then skip to the next frame.
  144. */
  145. if (stack >= end)
  146. continue;
  147. /*
  148. * Is 'stack' above this exception frame's start address?
  149. * If yes then we found the right frame.
  150. */
  151. if (stack >= end - EXCEPTION_STKSZ) {
  152. /*
  153. * Make sure we only iterate through an exception
  154. * stack once. If it comes up for the second time
  155. * then there's something wrong going on - just
  156. * break out and return NULL:
  157. */
  158. if (*usedp & (1U << k))
  159. break;
  160. *usedp |= 1U << k;
  161. *idp = ids[k];
  162. return (unsigned long *)end;
  163. }
  164. /*
  165. * If this is a debug stack, and if it has a larger size than
  166. * the usual exception stacks, then 'stack' might still
  167. * be within the lower portion of the debug stack:
  168. */
  169. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  170. if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
  171. unsigned j = N_EXCEPTION_STACKS - 1;
  172. /*
  173. * Black magic. A large debug stack is composed of
  174. * multiple exception stack entries, which we
  175. * iterate through now. Dont look:
  176. */
  177. do {
  178. ++j;
  179. end -= EXCEPTION_STKSZ;
  180. ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
  181. } while (stack < end - EXCEPTION_STKSZ);
  182. if (*usedp & (1U << j))
  183. break;
  184. *usedp |= 1U << j;
  185. *idp = ids[j];
  186. return (unsigned long *)end;
  187. }
  188. #endif
  189. }
  190. return NULL;
  191. }
  192. #define MSG(txt) ops->warning(data, txt)
  193. /*
  194. * x86-64 can have up to three kernel stacks:
  195. * process stack
  196. * interrupt stack
  197. * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  198. */
  199. static inline int valid_stack_ptr(struct thread_info *tinfo,
  200. void *p, unsigned int size, void *end)
  201. {
  202. void *t = (void *)tinfo;
  203. if (end) {
  204. if (p < end && p >= (end-THREAD_SIZE))
  205. return 1;
  206. else
  207. return 0;
  208. }
  209. return p > t && p < t + THREAD_SIZE - size;
  210. }
  211. /* The form of the top of the frame on the stack */
  212. struct stack_frame {
  213. struct stack_frame *next_frame;
  214. unsigned long return_address;
  215. };
  216. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  217. unsigned long *stack, unsigned long bp,
  218. const struct stacktrace_ops *ops, void *data,
  219. unsigned long *end)
  220. {
  221. struct stack_frame *frame = (struct stack_frame *)bp;
  222. while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
  223. unsigned long addr;
  224. addr = *stack;
  225. if (__kernel_text_address(addr)) {
  226. if ((unsigned long) stack == bp + 8) {
  227. ops->address(data, addr, 1);
  228. frame = frame->next_frame;
  229. bp = (unsigned long) frame;
  230. } else {
  231. ops->address(data, addr, bp == 0);
  232. }
  233. }
  234. stack++;
  235. }
  236. return bp;
  237. }
  238. void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
  239. unsigned long *stack, unsigned long bp,
  240. const struct stacktrace_ops *ops, void *data)
  241. {
  242. const unsigned cpu = get_cpu();
  243. unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
  244. unsigned used = 0;
  245. struct thread_info *tinfo;
  246. if (!tsk)
  247. tsk = current;
  248. tinfo = task_thread_info(tsk);
  249. if (!stack) {
  250. unsigned long dummy;
  251. stack = &dummy;
  252. if (tsk && tsk != current)
  253. stack = (unsigned long *)tsk->thread.sp;
  254. }
  255. #ifdef CONFIG_FRAME_POINTER
  256. if (!bp) {
  257. if (tsk == current) {
  258. /* Grab bp right from our regs */
  259. asm("movq %%rbp, %0" : "=r" (bp):);
  260. } else {
  261. /* bp is the last reg pushed by switch_to */
  262. bp = *(unsigned long *) tsk->thread.sp;
  263. }
  264. }
  265. #endif
  266. /*
  267. * Print function call entries in all stacks, starting at the
  268. * current stack address. If the stacks consist of nested
  269. * exceptions
  270. */
  271. for (;;) {
  272. char *id;
  273. unsigned long *estack_end;
  274. estack_end = in_exception_stack(cpu, (unsigned long)stack,
  275. &used, &id);
  276. if (estack_end) {
  277. if (ops->stack(data, id) < 0)
  278. break;
  279. bp = print_context_stack(tinfo, stack, bp, ops,
  280. data, estack_end);
  281. ops->stack(data, "<EOE>");
  282. /*
  283. * We link to the next stack via the
  284. * second-to-last pointer (index -2 to end) in the
  285. * exception stack:
  286. */
  287. stack = (unsigned long *) estack_end[-2];
  288. continue;
  289. }
  290. if (irqstack_end) {
  291. unsigned long *irqstack;
  292. irqstack = irqstack_end -
  293. (IRQSTACKSIZE - 64) / sizeof(*irqstack);
  294. if (stack >= irqstack && stack < irqstack_end) {
  295. if (ops->stack(data, "IRQ") < 0)
  296. break;
  297. bp = print_context_stack(tinfo, stack, bp,
  298. ops, data, irqstack_end);
  299. /*
  300. * We link to the next stack (which would be
  301. * the process stack normally) the last
  302. * pointer (index -1 to end) in the IRQ stack:
  303. */
  304. stack = (unsigned long *) (irqstack_end[-1]);
  305. irqstack_end = NULL;
  306. ops->stack(data, "EOI");
  307. continue;
  308. }
  309. }
  310. break;
  311. }
  312. /*
  313. * This handles the process stack:
  314. */
  315. bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
  316. put_cpu();
  317. }
  318. EXPORT_SYMBOL(dump_trace);
  319. static void
  320. print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
  321. {
  322. print_symbol(msg, symbol);
  323. printk("\n");
  324. }
  325. static void print_trace_warning(void *data, char *msg)
  326. {
  327. printk("%s\n", msg);
  328. }
  329. static int print_trace_stack(void *data, char *name)
  330. {
  331. printk(" <%s> ", name);
  332. return 0;
  333. }
  334. static void print_trace_address(void *data, unsigned long addr, int reliable)
  335. {
  336. touch_nmi_watchdog();
  337. printk_address(addr, reliable);
  338. }
  339. static const struct stacktrace_ops print_trace_ops = {
  340. .warning = print_trace_warning,
  341. .warning_symbol = print_trace_warning_symbol,
  342. .stack = print_trace_stack,
  343. .address = print_trace_address,
  344. };
  345. void
  346. show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
  347. unsigned long bp)
  348. {
  349. printk("\nCall Trace:\n");
  350. dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
  351. printk("\n");
  352. }
  353. static void
  354. _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
  355. unsigned long bp)
  356. {
  357. unsigned long *stack;
  358. int i;
  359. const int cpu = smp_processor_id();
  360. unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
  361. unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
  362. // debugging aid: "show_stack(NULL, NULL);" prints the
  363. // back trace for this cpu.
  364. if (sp == NULL) {
  365. if (tsk)
  366. sp = (unsigned long *)tsk->thread.sp;
  367. else
  368. sp = (unsigned long *)&sp;
  369. }
  370. stack = sp;
  371. for(i=0; i < kstack_depth_to_print; i++) {
  372. if (stack >= irqstack && stack <= irqstack_end) {
  373. if (stack == irqstack_end) {
  374. stack = (unsigned long *) (irqstack_end[-1]);
  375. printk(" <EOI> ");
  376. }
  377. } else {
  378. if (((long) stack & (THREAD_SIZE-1)) == 0)
  379. break;
  380. }
  381. if (i && ((i % 4) == 0))
  382. printk("\n");
  383. printk(" %016lx", *stack++);
  384. touch_nmi_watchdog();
  385. }
  386. show_trace(tsk, regs, sp, bp);
  387. }
  388. void show_stack(struct task_struct *tsk, unsigned long * sp)
  389. {
  390. _show_stack(tsk, NULL, sp, 0);
  391. }
  392. /*
  393. * The architecture-independent dump_stack generator
  394. */
  395. void dump_stack(void)
  396. {
  397. unsigned long dummy;
  398. unsigned long bp = 0;
  399. #ifdef CONFIG_FRAME_POINTER
  400. if (!bp)
  401. asm("movq %%rbp, %0" : "=r" (bp):);
  402. #endif
  403. printk("Pid: %d, comm: %.20s %s %s %.*s\n",
  404. current->pid, current->comm, print_tainted(),
  405. init_utsname()->release,
  406. (int)strcspn(init_utsname()->version, " "),
  407. init_utsname()->version);
  408. show_trace(NULL, NULL, &dummy, bp);
  409. }
  410. EXPORT_SYMBOL(dump_stack);
  411. void show_registers(struct pt_regs *regs)
  412. {
  413. int i;
  414. unsigned long sp;
  415. const int cpu = smp_processor_id();
  416. struct task_struct *cur = cpu_pda(cpu)->pcurrent;
  417. u8 *ip;
  418. unsigned int code_prologue = code_bytes * 43 / 64;
  419. unsigned int code_len = code_bytes;
  420. sp = regs->sp;
  421. ip = (u8 *) regs->ip - code_prologue;
  422. printk("CPU %d ", cpu);
  423. __show_regs(regs);
  424. printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
  425. cur->comm, cur->pid, task_thread_info(cur), cur);
  426. /*
  427. * When in-kernel, we also print out the stack and code at the
  428. * time of the fault..
  429. */
  430. if (!user_mode(regs)) {
  431. unsigned char c;
  432. printk("Stack: ");
  433. _show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
  434. printk("\n");
  435. printk(KERN_EMERG "Code: ");
  436. if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
  437. /* try starting at RIP */
  438. ip = (u8 *) regs->ip;
  439. code_len = code_len - code_prologue + 1;
  440. }
  441. for (i = 0; i < code_len; i++, ip++) {
  442. if (ip < (u8 *)PAGE_OFFSET ||
  443. probe_kernel_address(ip, c)) {
  444. printk(" Bad RIP value.");
  445. break;
  446. }
  447. if (ip == (u8 *)regs->ip)
  448. printk("<%02x> ", c);
  449. else
  450. printk("%02x ", c);
  451. }
  452. }
  453. printk("\n");
  454. }
  455. int is_valid_bugaddr(unsigned long ip)
  456. {
  457. unsigned short ud2;
  458. if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
  459. return 0;
  460. return ud2 == 0x0b0f;
  461. }
  462. static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
  463. static int die_owner = -1;
  464. static unsigned int die_nest_count;
  465. unsigned __kprobes long oops_begin(void)
  466. {
  467. int cpu;
  468. unsigned long flags;
  469. oops_enter();
  470. /* racy, but better than risking deadlock. */
  471. raw_local_irq_save(flags);
  472. cpu = smp_processor_id();
  473. if (!__raw_spin_trylock(&die_lock)) {
  474. if (cpu == die_owner)
  475. /* nested oops. should stop eventually */;
  476. else
  477. __raw_spin_lock(&die_lock);
  478. }
  479. die_nest_count++;
  480. die_owner = cpu;
  481. console_verbose();
  482. bust_spinlocks(1);
  483. return flags;
  484. }
  485. void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
  486. {
  487. die_owner = -1;
  488. bust_spinlocks(0);
  489. die_nest_count--;
  490. if (!die_nest_count)
  491. /* Nest count reaches zero, release the lock. */
  492. __raw_spin_unlock(&die_lock);
  493. raw_local_irq_restore(flags);
  494. if (!regs) {
  495. oops_exit();
  496. return;
  497. }
  498. if (panic_on_oops)
  499. panic("Fatal exception");
  500. oops_exit();
  501. do_exit(signr);
  502. }
  503. int __kprobes __die(const char * str, struct pt_regs * regs, long err)
  504. {
  505. static int die_counter;
  506. printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
  507. #ifdef CONFIG_PREEMPT
  508. printk("PREEMPT ");
  509. #endif
  510. #ifdef CONFIG_SMP
  511. printk("SMP ");
  512. #endif
  513. #ifdef CONFIG_DEBUG_PAGEALLOC
  514. printk("DEBUG_PAGEALLOC");
  515. #endif
  516. printk("\n");
  517. if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
  518. return 1;
  519. show_registers(regs);
  520. add_taint(TAINT_DIE);
  521. /* Executive summary in case the oops scrolled away */
  522. printk(KERN_ALERT "RIP ");
  523. printk_address(regs->ip, 1);
  524. printk(" RSP <%016lx>\n", regs->sp);
  525. if (kexec_should_crash(current))
  526. crash_kexec(regs);
  527. return 0;
  528. }
  529. void die(const char * str, struct pt_regs * regs, long err)
  530. {
  531. unsigned long flags = oops_begin();
  532. if (!user_mode(regs))
  533. report_bug(regs->ip, regs);
  534. if (__die(str, regs, err))
  535. regs = NULL;
  536. oops_end(flags, regs, SIGSEGV);
  537. }
  538. void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
  539. {
  540. unsigned long flags = oops_begin();
  541. /*
  542. * We are in trouble anyway, lets at least try
  543. * to get a message out.
  544. */
  545. printk(str, smp_processor_id());
  546. show_registers(regs);
  547. if (kexec_should_crash(current))
  548. crash_kexec(regs);
  549. if (do_panic || panic_on_oops)
  550. panic("Non maskable interrupt");
  551. oops_end(flags, NULL, SIGBUS);
  552. nmi_exit();
  553. local_irq_enable();
  554. do_exit(SIGBUS);
  555. }
  556. static void __kprobes do_trap(int trapnr, int signr, char *str,
  557. struct pt_regs * regs, long error_code,
  558. siginfo_t *info)
  559. {
  560. struct task_struct *tsk = current;
  561. if (user_mode(regs)) {
  562. /*
  563. * We want error_code and trap_no set for userspace
  564. * faults and kernelspace faults which result in
  565. * die(), but not kernelspace faults which are fixed
  566. * up. die() gives the process no chance to handle
  567. * the signal and notice the kernel fault information,
  568. * so that won't result in polluting the information
  569. * about previously queued, but not yet delivered,
  570. * faults. See also do_general_protection below.
  571. */
  572. tsk->thread.error_code = error_code;
  573. tsk->thread.trap_no = trapnr;
  574. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  575. printk_ratelimit()) {
  576. printk(KERN_INFO
  577. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  578. tsk->comm, tsk->pid, str,
  579. regs->ip, regs->sp, error_code);
  580. print_vma_addr(" in ", regs->ip);
  581. printk("\n");
  582. }
  583. if (info)
  584. force_sig_info(signr, info, tsk);
  585. else
  586. force_sig(signr, tsk);
  587. return;
  588. }
  589. if (!fixup_exception(regs)) {
  590. tsk->thread.error_code = error_code;
  591. tsk->thread.trap_no = trapnr;
  592. die(str, regs, error_code);
  593. }
  594. return;
  595. }
  596. #define DO_ERROR(trapnr, signr, str, name) \
  597. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  598. { \
  599. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  600. == NOTIFY_STOP) \
  601. return; \
  602. conditional_sti(regs); \
  603. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  604. }
  605. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  606. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  607. { \
  608. siginfo_t info; \
  609. info.si_signo = signr; \
  610. info.si_errno = 0; \
  611. info.si_code = sicode; \
  612. info.si_addr = (void __user *)siaddr; \
  613. trace_hardirqs_fixup(); \
  614. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  615. == NOTIFY_STOP) \
  616. return; \
  617. conditional_sti(regs); \
  618. do_trap(trapnr, signr, str, regs, error_code, &info); \
  619. }
  620. DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
  621. DO_ERROR( 4, SIGSEGV, "overflow", overflow)
  622. DO_ERROR( 5, SIGSEGV, "bounds", bounds)
  623. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
  624. DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
  625. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  626. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  627. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  628. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  629. DO_ERROR(18, SIGSEGV, "reserved", reserved)
  630. /* Runs on IST stack */
  631. asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  632. {
  633. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  634. 12, SIGBUS) == NOTIFY_STOP)
  635. return;
  636. preempt_conditional_sti(regs);
  637. do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  638. preempt_conditional_cli(regs);
  639. }
  640. asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
  641. {
  642. static const char str[] = "double fault";
  643. struct task_struct *tsk = current;
  644. /* Return not checked because double check cannot be ignored */
  645. notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  646. tsk->thread.error_code = error_code;
  647. tsk->thread.trap_no = 8;
  648. /* This is always a kernel trap and never fixable (and thus must
  649. never return). */
  650. for (;;)
  651. die(str, regs, error_code);
  652. }
  653. asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
  654. long error_code)
  655. {
  656. struct task_struct *tsk = current;
  657. conditional_sti(regs);
  658. if (user_mode(regs)) {
  659. tsk->thread.error_code = error_code;
  660. tsk->thread.trap_no = 13;
  661. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  662. printk_ratelimit()) {
  663. printk(KERN_INFO
  664. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  665. tsk->comm, tsk->pid,
  666. regs->ip, regs->sp, error_code);
  667. print_vma_addr(" in ", regs->ip);
  668. printk("\n");
  669. }
  670. force_sig(SIGSEGV, tsk);
  671. return;
  672. }
  673. if (fixup_exception(regs))
  674. return;
  675. tsk->thread.error_code = error_code;
  676. tsk->thread.trap_no = 13;
  677. if (notify_die(DIE_GPF, "general protection fault", regs,
  678. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  679. return;
  680. die("general protection fault", regs, error_code);
  681. }
  682. static __kprobes void
  683. mem_parity_error(unsigned char reason, struct pt_regs * regs)
  684. {
  685. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  686. reason);
  687. printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
  688. #if defined(CONFIG_EDAC)
  689. if(edac_handler_set()) {
  690. edac_atomic_assert_error();
  691. return;
  692. }
  693. #endif
  694. if (panic_on_unrecovered_nmi)
  695. panic("NMI: Not continuing");
  696. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  697. /* Clear and disable the memory parity error line. */
  698. reason = (reason & 0xf) | 4;
  699. outb(reason, 0x61);
  700. }
  701. static __kprobes void
  702. io_check_error(unsigned char reason, struct pt_regs * regs)
  703. {
  704. printk("NMI: IOCK error (debug interrupt?)\n");
  705. show_registers(regs);
  706. /* Re-enable the IOCK line, wait for a few seconds */
  707. reason = (reason & 0xf) | 8;
  708. outb(reason, 0x61);
  709. mdelay(2000);
  710. reason &= ~8;
  711. outb(reason, 0x61);
  712. }
  713. static __kprobes void
  714. unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  715. {
  716. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  717. reason);
  718. printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
  719. if (panic_on_unrecovered_nmi)
  720. panic("NMI: Not continuing");
  721. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  722. }
  723. /* Runs on IST stack. This code must keep interrupts off all the time.
  724. Nested NMIs are prevented by the CPU. */
  725. asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
  726. {
  727. unsigned char reason = 0;
  728. int cpu;
  729. cpu = smp_processor_id();
  730. /* Only the BSP gets external NMIs from the system. */
  731. if (!cpu)
  732. reason = get_nmi_reason();
  733. if (!(reason & 0xc0)) {
  734. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  735. == NOTIFY_STOP)
  736. return;
  737. /*
  738. * Ok, so this is none of the documented NMI sources,
  739. * so it must be the NMI watchdog.
  740. */
  741. if (nmi_watchdog_tick(regs,reason))
  742. return;
  743. if (!do_nmi_callback(regs,cpu))
  744. unknown_nmi_error(reason, regs);
  745. return;
  746. }
  747. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  748. return;
  749. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  750. if (reason & 0x80)
  751. mem_parity_error(reason, regs);
  752. if (reason & 0x40)
  753. io_check_error(reason, regs);
  754. }
  755. /* runs on IST stack. */
  756. asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
  757. {
  758. trace_hardirqs_fixup();
  759. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
  760. return;
  761. }
  762. preempt_conditional_sti(regs);
  763. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  764. preempt_conditional_cli(regs);
  765. }
  766. /* Help handler running on IST stack to switch back to user stack
  767. for scheduling or signal handling. The actual stack switch is done in
  768. entry.S */
  769. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  770. {
  771. struct pt_regs *regs = eregs;
  772. /* Did already sync */
  773. if (eregs == (struct pt_regs *)eregs->sp)
  774. ;
  775. /* Exception from user space */
  776. else if (user_mode(eregs))
  777. regs = task_pt_regs(current);
  778. /* Exception from kernel and interrupts are enabled. Move to
  779. kernel process stack. */
  780. else if (eregs->flags & X86_EFLAGS_IF)
  781. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  782. if (eregs != regs)
  783. *regs = *eregs;
  784. return regs;
  785. }
  786. /* runs on IST stack. */
  787. asmlinkage void __kprobes do_debug(struct pt_regs * regs,
  788. unsigned long error_code)
  789. {
  790. unsigned long condition;
  791. struct task_struct *tsk = current;
  792. siginfo_t info;
  793. trace_hardirqs_fixup();
  794. get_debugreg(condition, 6);
  795. /*
  796. * The processor cleared BTF, so don't mark that we need it set.
  797. */
  798. clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
  799. tsk->thread.debugctlmsr = 0;
  800. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  801. SIGTRAP) == NOTIFY_STOP)
  802. return;
  803. preempt_conditional_sti(regs);
  804. /* Mask out spurious debug traps due to lazy DR7 setting */
  805. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  806. if (!tsk->thread.debugreg7) {
  807. goto clear_dr7;
  808. }
  809. }
  810. tsk->thread.debugreg6 = condition;
  811. /*
  812. * Single-stepping through TF: make sure we ignore any events in
  813. * kernel space (but re-enable TF when returning to user mode).
  814. */
  815. if (condition & DR_STEP) {
  816. if (!user_mode(regs))
  817. goto clear_TF_reenable;
  818. }
  819. /* Ok, finally something we can handle */
  820. tsk->thread.trap_no = 1;
  821. tsk->thread.error_code = error_code;
  822. info.si_signo = SIGTRAP;
  823. info.si_errno = 0;
  824. info.si_code = TRAP_BRKPT;
  825. info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
  826. force_sig_info(SIGTRAP, &info, tsk);
  827. clear_dr7:
  828. set_debugreg(0UL, 7);
  829. preempt_conditional_cli(regs);
  830. return;
  831. clear_TF_reenable:
  832. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  833. regs->flags &= ~X86_EFLAGS_TF;
  834. preempt_conditional_cli(regs);
  835. }
  836. static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
  837. {
  838. if (fixup_exception(regs))
  839. return 1;
  840. notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
  841. /* Illegal floating point operation in the kernel */
  842. current->thread.trap_no = trapnr;
  843. die(str, regs, 0);
  844. return 0;
  845. }
  846. /*
  847. * Note that we play around with the 'TS' bit in an attempt to get
  848. * the correct behaviour even in the presence of the asynchronous
  849. * IRQ13 behaviour
  850. */
  851. asmlinkage void do_coprocessor_error(struct pt_regs *regs)
  852. {
  853. void __user *ip = (void __user *)(regs->ip);
  854. struct task_struct * task;
  855. siginfo_t info;
  856. unsigned short cwd, swd;
  857. conditional_sti(regs);
  858. if (!user_mode(regs) &&
  859. kernel_math_error(regs, "kernel x87 math error", 16))
  860. return;
  861. /*
  862. * Save the info for the exception handler and clear the error.
  863. */
  864. task = current;
  865. save_init_fpu(task);
  866. task->thread.trap_no = 16;
  867. task->thread.error_code = 0;
  868. info.si_signo = SIGFPE;
  869. info.si_errno = 0;
  870. info.si_code = __SI_FAULT;
  871. info.si_addr = ip;
  872. /*
  873. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  874. * status. 0x3f is the exception bits in these regs, 0x200 is the
  875. * C1 reg you need in case of a stack fault, 0x040 is the stack
  876. * fault bit. We should only be taking one exception at a time,
  877. * so if this combination doesn't produce any single exception,
  878. * then we have a bad program that isn't synchronizing its FPU usage
  879. * and it will suffer the consequences since we won't be able to
  880. * fully reproduce the context of the exception
  881. */
  882. cwd = get_fpu_cwd(task);
  883. swd = get_fpu_swd(task);
  884. switch (swd & ~cwd & 0x3f) {
  885. case 0x000:
  886. default:
  887. break;
  888. case 0x001: /* Invalid Op */
  889. /*
  890. * swd & 0x240 == 0x040: Stack Underflow
  891. * swd & 0x240 == 0x240: Stack Overflow
  892. * User must clear the SF bit (0x40) if set
  893. */
  894. info.si_code = FPE_FLTINV;
  895. break;
  896. case 0x002: /* Denormalize */
  897. case 0x010: /* Underflow */
  898. info.si_code = FPE_FLTUND;
  899. break;
  900. case 0x004: /* Zero Divide */
  901. info.si_code = FPE_FLTDIV;
  902. break;
  903. case 0x008: /* Overflow */
  904. info.si_code = FPE_FLTOVF;
  905. break;
  906. case 0x020: /* Precision */
  907. info.si_code = FPE_FLTRES;
  908. break;
  909. }
  910. force_sig_info(SIGFPE, &info, task);
  911. }
  912. asmlinkage void bad_intr(void)
  913. {
  914. printk("bad interrupt");
  915. }
  916. asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
  917. {
  918. void __user *ip = (void __user *)(regs->ip);
  919. struct task_struct * task;
  920. siginfo_t info;
  921. unsigned short mxcsr;
  922. conditional_sti(regs);
  923. if (!user_mode(regs) &&
  924. kernel_math_error(regs, "kernel simd math error", 19))
  925. return;
  926. /*
  927. * Save the info for the exception handler and clear the error.
  928. */
  929. task = current;
  930. save_init_fpu(task);
  931. task->thread.trap_no = 19;
  932. task->thread.error_code = 0;
  933. info.si_signo = SIGFPE;
  934. info.si_errno = 0;
  935. info.si_code = __SI_FAULT;
  936. info.si_addr = ip;
  937. /*
  938. * The SIMD FPU exceptions are handled a little differently, as there
  939. * is only a single status/control register. Thus, to determine which
  940. * unmasked exception was caught we must mask the exception mask bits
  941. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  942. */
  943. mxcsr = get_fpu_mxcsr(task);
  944. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  945. case 0x000:
  946. default:
  947. break;
  948. case 0x001: /* Invalid Op */
  949. info.si_code = FPE_FLTINV;
  950. break;
  951. case 0x002: /* Denormalize */
  952. case 0x010: /* Underflow */
  953. info.si_code = FPE_FLTUND;
  954. break;
  955. case 0x004: /* Zero Divide */
  956. info.si_code = FPE_FLTDIV;
  957. break;
  958. case 0x008: /* Overflow */
  959. info.si_code = FPE_FLTOVF;
  960. break;
  961. case 0x020: /* Precision */
  962. info.si_code = FPE_FLTRES;
  963. break;
  964. }
  965. force_sig_info(SIGFPE, &info, task);
  966. }
  967. asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
  968. {
  969. }
  970. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  971. {
  972. }
  973. asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
  974. {
  975. }
  976. /*
  977. * 'math_state_restore()' saves the current math information in the
  978. * old math state array, and gets the new ones from the current task
  979. *
  980. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  981. * Don't touch unless you *really* know how it works.
  982. */
  983. asmlinkage void math_state_restore(void)
  984. {
  985. struct task_struct *me = current;
  986. clts(); /* Allow maths ops (or we recurse) */
  987. if (!used_math())
  988. init_fpu(me);
  989. restore_fpu_checking(&me->thread.i387.fxsave);
  990. task_thread_info(me)->status |= TS_USEDFPU;
  991. me->fpu_counter++;
  992. }
  993. EXPORT_SYMBOL_GPL(math_state_restore);
  994. void __init trap_init(void)
  995. {
  996. set_intr_gate(0,&divide_error);
  997. set_intr_gate_ist(1,&debug,DEBUG_STACK);
  998. set_intr_gate_ist(2,&nmi,NMI_STACK);
  999. set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
  1000. set_system_gate(4,&overflow); /* int4 can be called from all */
  1001. set_intr_gate(5,&bounds);
  1002. set_intr_gate(6,&invalid_op);
  1003. set_intr_gate(7,&device_not_available);
  1004. set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
  1005. set_intr_gate(9,&coprocessor_segment_overrun);
  1006. set_intr_gate(10,&invalid_TSS);
  1007. set_intr_gate(11,&segment_not_present);
  1008. set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
  1009. set_intr_gate(13,&general_protection);
  1010. set_intr_gate(14,&page_fault);
  1011. set_intr_gate(15,&spurious_interrupt_bug);
  1012. set_intr_gate(16,&coprocessor_error);
  1013. set_intr_gate(17,&alignment_check);
  1014. #ifdef CONFIG_X86_MCE
  1015. set_intr_gate_ist(18,&machine_check, MCE_STACK);
  1016. #endif
  1017. set_intr_gate(19,&simd_coprocessor_error);
  1018. #ifdef CONFIG_IA32_EMULATION
  1019. set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  1020. #endif
  1021. /*
  1022. * Should be a barrier for any external CPU state.
  1023. */
  1024. cpu_init();
  1025. }
  1026. static int __init oops_setup(char *s)
  1027. {
  1028. if (!s)
  1029. return -EINVAL;
  1030. if (!strcmp(s, "panic"))
  1031. panic_on_oops = 1;
  1032. return 0;
  1033. }
  1034. early_param("oops", oops_setup);
  1035. static int __init kstack_setup(char *s)
  1036. {
  1037. if (!s)
  1038. return -EINVAL;
  1039. kstack_depth_to_print = simple_strtoul(s,NULL,0);
  1040. return 0;
  1041. }
  1042. early_param("kstack", kstack_setup);
  1043. static int __init code_bytes_setup(char *s)
  1044. {
  1045. code_bytes = simple_strtoul(s, NULL, 0);
  1046. if (code_bytes > 8192)
  1047. code_bytes = 8192;
  1048. return 1;
  1049. }
  1050. __setup("code_bytes=", code_bytes_setup);