traps_64.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * 'Traps.c' handles hardware traps and faults after we have saved some
  10. * state in 'entry.S'.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/timer.h>
  18. #include <linux/mm.h>
  19. #include <linux/init.h>
  20. #include <linux/delay.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/kallsyms.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/nmi.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/kexec.h>
  29. #include <linux/unwind.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/bug.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/utsname.h>
  34. #if defined(CONFIG_EDAC)
  35. #include <linux/edac.h>
  36. #endif
  37. #include <asm/system.h>
  38. #include <asm/io.h>
  39. #include <asm/atomic.h>
  40. #include <asm/debugreg.h>
  41. #include <asm/desc.h>
  42. #include <asm/i387.h>
  43. #include <asm/processor.h>
  44. #include <asm/unwind.h>
  45. #include <asm/smp.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/pda.h>
  48. #include <asm/proto.h>
  49. #include <asm/nmi.h>
  50. #include <asm/stacktrace.h>
  51. asmlinkage void divide_error(void);
  52. asmlinkage void debug(void);
  53. asmlinkage void nmi(void);
  54. asmlinkage void int3(void);
  55. asmlinkage void overflow(void);
  56. asmlinkage void bounds(void);
  57. asmlinkage void invalid_op(void);
  58. asmlinkage void device_not_available(void);
  59. asmlinkage void double_fault(void);
  60. asmlinkage void coprocessor_segment_overrun(void);
  61. asmlinkage void invalid_TSS(void);
  62. asmlinkage void segment_not_present(void);
  63. asmlinkage void stack_segment(void);
  64. asmlinkage void general_protection(void);
  65. asmlinkage void page_fault(void);
  66. asmlinkage void coprocessor_error(void);
  67. asmlinkage void simd_coprocessor_error(void);
  68. asmlinkage void reserved(void);
  69. asmlinkage void alignment_check(void);
  70. asmlinkage void machine_check(void);
  71. asmlinkage void spurious_interrupt_bug(void);
  72. static unsigned int code_bytes = 64;
  73. static inline void conditional_sti(struct pt_regs *regs)
  74. {
  75. if (regs->flags & X86_EFLAGS_IF)
  76. local_irq_enable();
  77. }
  78. static inline void preempt_conditional_sti(struct pt_regs *regs)
  79. {
  80. inc_preempt_count();
  81. if (regs->flags & X86_EFLAGS_IF)
  82. local_irq_enable();
  83. }
  84. static inline void preempt_conditional_cli(struct pt_regs *regs)
  85. {
  86. if (regs->flags & X86_EFLAGS_IF)
  87. local_irq_disable();
  88. /* Make sure to not schedule here because we could be running
  89. on an exception stack. */
  90. dec_preempt_count();
  91. }
  92. int kstack_depth_to_print = 12;
  93. void printk_address(unsigned long address, int reliable)
  94. {
  95. #ifdef CONFIG_KALLSYMS
  96. unsigned long offset = 0, symsize;
  97. const char *symname;
  98. char *modname;
  99. char *delim = ":";
  100. char namebuf[KSYM_NAME_LEN];
  101. char reliab[4] = "";
  102. symname = kallsyms_lookup(address, &symsize, &offset,
  103. &modname, namebuf);
  104. if (!symname) {
  105. printk(" [<%016lx>]\n", address);
  106. return;
  107. }
  108. if (!reliable)
  109. strcpy(reliab, "? ");
  110. if (!modname)
  111. modname = delim = "";
  112. printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
  113. address, reliab, delim, modname, delim, symname, offset, symsize);
  114. #else
  115. printk(" [<%016lx>]\n", address);
  116. #endif
  117. }
  118. static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  119. unsigned *usedp, char **idp)
  120. {
  121. static char ids[][8] = {
  122. [DEBUG_STACK - 1] = "#DB",
  123. [NMI_STACK - 1] = "NMI",
  124. [DOUBLEFAULT_STACK - 1] = "#DF",
  125. [STACKFAULT_STACK - 1] = "#SS",
  126. [MCE_STACK - 1] = "#MC",
  127. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  128. [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
  129. #endif
  130. };
  131. unsigned k;
  132. /*
  133. * Iterate over all exception stacks, and figure out whether
  134. * 'stack' is in one of them:
  135. */
  136. for (k = 0; k < N_EXCEPTION_STACKS; k++) {
  137. unsigned long end = per_cpu(orig_ist, cpu).ist[k];
  138. /*
  139. * Is 'stack' above this exception frame's end?
  140. * If yes then skip to the next frame.
  141. */
  142. if (stack >= end)
  143. continue;
  144. /*
  145. * Is 'stack' above this exception frame's start address?
  146. * If yes then we found the right frame.
  147. */
  148. if (stack >= end - EXCEPTION_STKSZ) {
  149. /*
  150. * Make sure we only iterate through an exception
  151. * stack once. If it comes up for the second time
  152. * then there's something wrong going on - just
  153. * break out and return NULL:
  154. */
  155. if (*usedp & (1U << k))
  156. break;
  157. *usedp |= 1U << k;
  158. *idp = ids[k];
  159. return (unsigned long *)end;
  160. }
  161. /*
  162. * If this is a debug stack, and if it has a larger size than
  163. * the usual exception stacks, then 'stack' might still
  164. * be within the lower portion of the debug stack:
  165. */
  166. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  167. if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
  168. unsigned j = N_EXCEPTION_STACKS - 1;
  169. /*
  170. * Black magic. A large debug stack is composed of
  171. * multiple exception stack entries, which we
  172. * iterate through now. Dont look:
  173. */
  174. do {
  175. ++j;
  176. end -= EXCEPTION_STKSZ;
  177. ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
  178. } while (stack < end - EXCEPTION_STKSZ);
  179. if (*usedp & (1U << j))
  180. break;
  181. *usedp |= 1U << j;
  182. *idp = ids[j];
  183. return (unsigned long *)end;
  184. }
  185. #endif
  186. }
  187. return NULL;
  188. }
  189. #define MSG(txt) ops->warning(data, txt)
  190. /*
  191. * x86-64 can have up to three kernel stacks:
  192. * process stack
  193. * interrupt stack
  194. * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  195. */
  196. static inline int valid_stack_ptr(struct thread_info *tinfo,
  197. void *p, unsigned int size, void *end)
  198. {
  199. void *t = tinfo;
  200. if (end) {
  201. if (p < end && p >= (end-THREAD_SIZE))
  202. return 1;
  203. else
  204. return 0;
  205. }
  206. return p > t && p < t + THREAD_SIZE - size;
  207. }
  208. /* The form of the top of the frame on the stack */
  209. struct stack_frame {
  210. struct stack_frame *next_frame;
  211. unsigned long return_address;
  212. };
  213. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  214. unsigned long *stack, unsigned long bp,
  215. const struct stacktrace_ops *ops, void *data,
  216. unsigned long *end)
  217. {
  218. struct stack_frame *frame = (struct stack_frame *)bp;
  219. while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
  220. unsigned long addr;
  221. addr = *stack;
  222. if (__kernel_text_address(addr)) {
  223. if ((unsigned long) stack == bp + 8) {
  224. ops->address(data, addr, 1);
  225. frame = frame->next_frame;
  226. bp = (unsigned long) frame;
  227. } else {
  228. ops->address(data, addr, bp == 0);
  229. }
  230. }
  231. stack++;
  232. }
  233. return bp;
  234. }
  235. void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
  236. unsigned long *stack, unsigned long bp,
  237. const struct stacktrace_ops *ops, void *data)
  238. {
  239. const unsigned cpu = get_cpu();
  240. unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
  241. unsigned used = 0;
  242. struct thread_info *tinfo;
  243. if (!tsk)
  244. tsk = current;
  245. tinfo = task_thread_info(tsk);
  246. if (!stack) {
  247. unsigned long dummy;
  248. stack = &dummy;
  249. if (tsk && tsk != current)
  250. stack = (unsigned long *)tsk->thread.sp;
  251. }
  252. #ifdef CONFIG_FRAME_POINTER
  253. if (!bp) {
  254. if (tsk == current) {
  255. /* Grab bp right from our regs */
  256. asm("movq %%rbp, %0" : "=r" (bp):);
  257. } else {
  258. /* bp is the last reg pushed by switch_to */
  259. bp = *(unsigned long *) tsk->thread.sp;
  260. }
  261. }
  262. #endif
  263. /*
  264. * Print function call entries in all stacks, starting at the
  265. * current stack address. If the stacks consist of nested
  266. * exceptions
  267. */
  268. for (;;) {
  269. char *id;
  270. unsigned long *estack_end;
  271. estack_end = in_exception_stack(cpu, (unsigned long)stack,
  272. &used, &id);
  273. if (estack_end) {
  274. if (ops->stack(data, id) < 0)
  275. break;
  276. bp = print_context_stack(tinfo, stack, bp, ops,
  277. data, estack_end);
  278. ops->stack(data, "<EOE>");
  279. /*
  280. * We link to the next stack via the
  281. * second-to-last pointer (index -2 to end) in the
  282. * exception stack:
  283. */
  284. stack = (unsigned long *) estack_end[-2];
  285. continue;
  286. }
  287. if (irqstack_end) {
  288. unsigned long *irqstack;
  289. irqstack = irqstack_end -
  290. (IRQSTACKSIZE - 64) / sizeof(*irqstack);
  291. if (stack >= irqstack && stack < irqstack_end) {
  292. if (ops->stack(data, "IRQ") < 0)
  293. break;
  294. bp = print_context_stack(tinfo, stack, bp,
  295. ops, data, irqstack_end);
  296. /*
  297. * We link to the next stack (which would be
  298. * the process stack normally) the last
  299. * pointer (index -1 to end) in the IRQ stack:
  300. */
  301. stack = (unsigned long *) (irqstack_end[-1]);
  302. irqstack_end = NULL;
  303. ops->stack(data, "EOI");
  304. continue;
  305. }
  306. }
  307. break;
  308. }
  309. /*
  310. * This handles the process stack:
  311. */
  312. bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
  313. put_cpu();
  314. }
  315. EXPORT_SYMBOL(dump_trace);
  316. static void
  317. print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
  318. {
  319. print_symbol(msg, symbol);
  320. printk("\n");
  321. }
  322. static void print_trace_warning(void *data, char *msg)
  323. {
  324. printk("%s\n", msg);
  325. }
  326. static int print_trace_stack(void *data, char *name)
  327. {
  328. printk(" <%s> ", name);
  329. return 0;
  330. }
  331. static void print_trace_address(void *data, unsigned long addr, int reliable)
  332. {
  333. touch_nmi_watchdog();
  334. printk_address(addr, reliable);
  335. }
  336. static const struct stacktrace_ops print_trace_ops = {
  337. .warning = print_trace_warning,
  338. .warning_symbol = print_trace_warning_symbol,
  339. .stack = print_trace_stack,
  340. .address = print_trace_address,
  341. };
  342. void
  343. show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
  344. unsigned long bp)
  345. {
  346. printk("\nCall Trace:\n");
  347. dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
  348. printk("\n");
  349. }
  350. static void
  351. _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
  352. unsigned long bp)
  353. {
  354. unsigned long *stack;
  355. int i;
  356. const int cpu = smp_processor_id();
  357. unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
  358. unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
  359. // debugging aid: "show_stack(NULL, NULL);" prints the
  360. // back trace for this cpu.
  361. if (sp == NULL) {
  362. if (tsk)
  363. sp = (unsigned long *)tsk->thread.sp;
  364. else
  365. sp = (unsigned long *)&sp;
  366. }
  367. stack = sp;
  368. for(i=0; i < kstack_depth_to_print; i++) {
  369. if (stack >= irqstack && stack <= irqstack_end) {
  370. if (stack == irqstack_end) {
  371. stack = (unsigned long *) (irqstack_end[-1]);
  372. printk(" <EOI> ");
  373. }
  374. } else {
  375. if (((long) stack & (THREAD_SIZE-1)) == 0)
  376. break;
  377. }
  378. if (i && ((i % 4) == 0))
  379. printk("\n");
  380. printk(" %016lx", *stack++);
  381. touch_nmi_watchdog();
  382. }
  383. show_trace(tsk, regs, sp, bp);
  384. }
  385. void show_stack(struct task_struct *tsk, unsigned long * sp)
  386. {
  387. _show_stack(tsk, NULL, sp, 0);
  388. }
  389. /*
  390. * The architecture-independent dump_stack generator
  391. */
  392. void dump_stack(void)
  393. {
  394. unsigned long dummy;
  395. unsigned long bp = 0;
  396. #ifdef CONFIG_FRAME_POINTER
  397. if (!bp)
  398. asm("movq %%rbp, %0" : "=r" (bp):);
  399. #endif
  400. printk("Pid: %d, comm: %.20s %s %s %.*s\n",
  401. current->pid, current->comm, print_tainted(),
  402. init_utsname()->release,
  403. (int)strcspn(init_utsname()->version, " "),
  404. init_utsname()->version);
  405. show_trace(NULL, NULL, &dummy, bp);
  406. }
  407. EXPORT_SYMBOL(dump_stack);
  408. void show_registers(struct pt_regs *regs)
  409. {
  410. int i;
  411. unsigned long sp;
  412. const int cpu = smp_processor_id();
  413. struct task_struct *cur = cpu_pda(cpu)->pcurrent;
  414. u8 *ip;
  415. unsigned int code_prologue = code_bytes * 43 / 64;
  416. unsigned int code_len = code_bytes;
  417. sp = regs->sp;
  418. ip = (u8 *) regs->ip - code_prologue;
  419. printk("CPU %d ", cpu);
  420. __show_regs(regs);
  421. printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
  422. cur->comm, cur->pid, task_thread_info(cur), cur);
  423. /*
  424. * When in-kernel, we also print out the stack and code at the
  425. * time of the fault..
  426. */
  427. if (!user_mode(regs)) {
  428. unsigned char c;
  429. printk("Stack: ");
  430. _show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
  431. printk("\n");
  432. printk(KERN_EMERG "Code: ");
  433. if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
  434. /* try starting at RIP */
  435. ip = (u8 *) regs->ip;
  436. code_len = code_len - code_prologue + 1;
  437. }
  438. for (i = 0; i < code_len; i++, ip++) {
  439. if (ip < (u8 *)PAGE_OFFSET ||
  440. probe_kernel_address(ip, c)) {
  441. printk(" Bad RIP value.");
  442. break;
  443. }
  444. if (ip == (u8 *)regs->ip)
  445. printk("<%02x> ", c);
  446. else
  447. printk("%02x ", c);
  448. }
  449. }
  450. printk("\n");
  451. }
  452. int is_valid_bugaddr(unsigned long ip)
  453. {
  454. unsigned short ud2;
  455. if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
  456. return 0;
  457. return ud2 == 0x0b0f;
  458. }
  459. static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
  460. static int die_owner = -1;
  461. static unsigned int die_nest_count;
  462. unsigned __kprobes long oops_begin(void)
  463. {
  464. int cpu;
  465. unsigned long flags;
  466. oops_enter();
  467. /* racy, but better than risking deadlock. */
  468. raw_local_irq_save(flags);
  469. cpu = smp_processor_id();
  470. if (!__raw_spin_trylock(&die_lock)) {
  471. if (cpu == die_owner)
  472. /* nested oops. should stop eventually */;
  473. else
  474. __raw_spin_lock(&die_lock);
  475. }
  476. die_nest_count++;
  477. die_owner = cpu;
  478. console_verbose();
  479. bust_spinlocks(1);
  480. return flags;
  481. }
  482. void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
  483. {
  484. die_owner = -1;
  485. bust_spinlocks(0);
  486. die_nest_count--;
  487. if (!die_nest_count)
  488. /* Nest count reaches zero, release the lock. */
  489. __raw_spin_unlock(&die_lock);
  490. raw_local_irq_restore(flags);
  491. if (!regs) {
  492. oops_exit();
  493. return;
  494. }
  495. if (panic_on_oops)
  496. panic("Fatal exception");
  497. oops_exit();
  498. do_exit(signr);
  499. }
  500. int __kprobes __die(const char * str, struct pt_regs * regs, long err)
  501. {
  502. static int die_counter;
  503. printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
  504. #ifdef CONFIG_PREEMPT
  505. printk("PREEMPT ");
  506. #endif
  507. #ifdef CONFIG_SMP
  508. printk("SMP ");
  509. #endif
  510. #ifdef CONFIG_DEBUG_PAGEALLOC
  511. printk("DEBUG_PAGEALLOC");
  512. #endif
  513. printk("\n");
  514. if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
  515. return 1;
  516. show_registers(regs);
  517. add_taint(TAINT_DIE);
  518. /* Executive summary in case the oops scrolled away */
  519. printk(KERN_ALERT "RIP ");
  520. printk_address(regs->ip, 1);
  521. printk(" RSP <%016lx>\n", regs->sp);
  522. if (kexec_should_crash(current))
  523. crash_kexec(regs);
  524. return 0;
  525. }
  526. void die(const char * str, struct pt_regs * regs, long err)
  527. {
  528. unsigned long flags = oops_begin();
  529. if (!user_mode(regs))
  530. report_bug(regs->ip, regs);
  531. if (__die(str, regs, err))
  532. regs = NULL;
  533. oops_end(flags, regs, SIGSEGV);
  534. }
  535. void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
  536. {
  537. unsigned long flags = oops_begin();
  538. /*
  539. * We are in trouble anyway, lets at least try
  540. * to get a message out.
  541. */
  542. printk(str, smp_processor_id());
  543. show_registers(regs);
  544. if (kexec_should_crash(current))
  545. crash_kexec(regs);
  546. if (do_panic || panic_on_oops)
  547. panic("Non maskable interrupt");
  548. oops_end(flags, NULL, SIGBUS);
  549. nmi_exit();
  550. local_irq_enable();
  551. do_exit(SIGBUS);
  552. }
  553. static void __kprobes do_trap(int trapnr, int signr, char *str,
  554. struct pt_regs * regs, long error_code,
  555. siginfo_t *info)
  556. {
  557. struct task_struct *tsk = current;
  558. if (user_mode(regs)) {
  559. /*
  560. * We want error_code and trap_no set for userspace
  561. * faults and kernelspace faults which result in
  562. * die(), but not kernelspace faults which are fixed
  563. * up. die() gives the process no chance to handle
  564. * the signal and notice the kernel fault information,
  565. * so that won't result in polluting the information
  566. * about previously queued, but not yet delivered,
  567. * faults. See also do_general_protection below.
  568. */
  569. tsk->thread.error_code = error_code;
  570. tsk->thread.trap_no = trapnr;
  571. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  572. printk_ratelimit()) {
  573. printk(KERN_INFO
  574. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  575. tsk->comm, tsk->pid, str,
  576. regs->ip, regs->sp, error_code);
  577. print_vma_addr(" in ", regs->ip);
  578. printk("\n");
  579. }
  580. if (info)
  581. force_sig_info(signr, info, tsk);
  582. else
  583. force_sig(signr, tsk);
  584. return;
  585. }
  586. if (!fixup_exception(regs)) {
  587. tsk->thread.error_code = error_code;
  588. tsk->thread.trap_no = trapnr;
  589. die(str, regs, error_code);
  590. }
  591. return;
  592. }
  593. #define DO_ERROR(trapnr, signr, str, name) \
  594. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  595. { \
  596. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  597. == NOTIFY_STOP) \
  598. return; \
  599. conditional_sti(regs); \
  600. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  601. }
  602. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  603. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  604. { \
  605. siginfo_t info; \
  606. info.si_signo = signr; \
  607. info.si_errno = 0; \
  608. info.si_code = sicode; \
  609. info.si_addr = (void __user *)siaddr; \
  610. trace_hardirqs_fixup(); \
  611. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  612. == NOTIFY_STOP) \
  613. return; \
  614. conditional_sti(regs); \
  615. do_trap(trapnr, signr, str, regs, error_code, &info); \
  616. }
  617. DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
  618. DO_ERROR( 4, SIGSEGV, "overflow", overflow)
  619. DO_ERROR( 5, SIGSEGV, "bounds", bounds)
  620. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
  621. DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
  622. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  623. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  624. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  625. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  626. DO_ERROR(18, SIGSEGV, "reserved", reserved)
  627. /* Runs on IST stack */
  628. asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  629. {
  630. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  631. 12, SIGBUS) == NOTIFY_STOP)
  632. return;
  633. preempt_conditional_sti(regs);
  634. do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  635. preempt_conditional_cli(regs);
  636. }
  637. asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
  638. {
  639. static const char str[] = "double fault";
  640. struct task_struct *tsk = current;
  641. /* Return not checked because double check cannot be ignored */
  642. notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  643. tsk->thread.error_code = error_code;
  644. tsk->thread.trap_no = 8;
  645. /* This is always a kernel trap and never fixable (and thus must
  646. never return). */
  647. for (;;)
  648. die(str, regs, error_code);
  649. }
  650. asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
  651. long error_code)
  652. {
  653. struct task_struct *tsk = current;
  654. conditional_sti(regs);
  655. if (user_mode(regs)) {
  656. tsk->thread.error_code = error_code;
  657. tsk->thread.trap_no = 13;
  658. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  659. printk_ratelimit()) {
  660. printk(KERN_INFO
  661. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  662. tsk->comm, tsk->pid,
  663. regs->ip, regs->sp, error_code);
  664. print_vma_addr(" in ", regs->ip);
  665. printk("\n");
  666. }
  667. force_sig(SIGSEGV, tsk);
  668. return;
  669. }
  670. if (fixup_exception(regs))
  671. return;
  672. tsk->thread.error_code = error_code;
  673. tsk->thread.trap_no = 13;
  674. if (notify_die(DIE_GPF, "general protection fault", regs,
  675. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  676. return;
  677. die("general protection fault", regs, error_code);
  678. }
  679. static __kprobes void
  680. mem_parity_error(unsigned char reason, struct pt_regs * regs)
  681. {
  682. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  683. reason);
  684. printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
  685. #if defined(CONFIG_EDAC)
  686. if(edac_handler_set()) {
  687. edac_atomic_assert_error();
  688. return;
  689. }
  690. #endif
  691. if (panic_on_unrecovered_nmi)
  692. panic("NMI: Not continuing");
  693. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  694. /* Clear and disable the memory parity error line. */
  695. reason = (reason & 0xf) | 4;
  696. outb(reason, 0x61);
  697. }
  698. static __kprobes void
  699. io_check_error(unsigned char reason, struct pt_regs * regs)
  700. {
  701. printk("NMI: IOCK error (debug interrupt?)\n");
  702. show_registers(regs);
  703. /* Re-enable the IOCK line, wait for a few seconds */
  704. reason = (reason & 0xf) | 8;
  705. outb(reason, 0x61);
  706. mdelay(2000);
  707. reason &= ~8;
  708. outb(reason, 0x61);
  709. }
  710. static __kprobes void
  711. unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  712. {
  713. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  714. reason);
  715. printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
  716. if (panic_on_unrecovered_nmi)
  717. panic("NMI: Not continuing");
  718. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  719. }
  720. /* Runs on IST stack. This code must keep interrupts off all the time.
  721. Nested NMIs are prevented by the CPU. */
  722. asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
  723. {
  724. unsigned char reason = 0;
  725. int cpu;
  726. cpu = smp_processor_id();
  727. /* Only the BSP gets external NMIs from the system. */
  728. if (!cpu)
  729. reason = get_nmi_reason();
  730. if (!(reason & 0xc0)) {
  731. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  732. == NOTIFY_STOP)
  733. return;
  734. /*
  735. * Ok, so this is none of the documented NMI sources,
  736. * so it must be the NMI watchdog.
  737. */
  738. if (nmi_watchdog_tick(regs,reason))
  739. return;
  740. if (!do_nmi_callback(regs,cpu))
  741. unknown_nmi_error(reason, regs);
  742. return;
  743. }
  744. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  745. return;
  746. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  747. if (reason & 0x80)
  748. mem_parity_error(reason, regs);
  749. if (reason & 0x40)
  750. io_check_error(reason, regs);
  751. }
  752. /* runs on IST stack. */
  753. asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
  754. {
  755. trace_hardirqs_fixup();
  756. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
  757. return;
  758. }
  759. preempt_conditional_sti(regs);
  760. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  761. preempt_conditional_cli(regs);
  762. }
  763. /* Help handler running on IST stack to switch back to user stack
  764. for scheduling or signal handling. The actual stack switch is done in
  765. entry.S */
  766. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  767. {
  768. struct pt_regs *regs = eregs;
  769. /* Did already sync */
  770. if (eregs == (struct pt_regs *)eregs->sp)
  771. ;
  772. /* Exception from user space */
  773. else if (user_mode(eregs))
  774. regs = task_pt_regs(current);
  775. /* Exception from kernel and interrupts are enabled. Move to
  776. kernel process stack. */
  777. else if (eregs->flags & X86_EFLAGS_IF)
  778. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  779. if (eregs != regs)
  780. *regs = *eregs;
  781. return regs;
  782. }
  783. /* runs on IST stack. */
  784. asmlinkage void __kprobes do_debug(struct pt_regs * regs,
  785. unsigned long error_code)
  786. {
  787. unsigned long condition;
  788. struct task_struct *tsk = current;
  789. siginfo_t info;
  790. trace_hardirqs_fixup();
  791. get_debugreg(condition, 6);
  792. /*
  793. * The processor cleared BTF, so don't mark that we need it set.
  794. */
  795. clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
  796. tsk->thread.debugctlmsr = 0;
  797. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  798. SIGTRAP) == NOTIFY_STOP)
  799. return;
  800. preempt_conditional_sti(regs);
  801. /* Mask out spurious debug traps due to lazy DR7 setting */
  802. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  803. if (!tsk->thread.debugreg7) {
  804. goto clear_dr7;
  805. }
  806. }
  807. tsk->thread.debugreg6 = condition;
  808. /*
  809. * Single-stepping through TF: make sure we ignore any events in
  810. * kernel space (but re-enable TF when returning to user mode).
  811. */
  812. if (condition & DR_STEP) {
  813. if (!user_mode(regs))
  814. goto clear_TF_reenable;
  815. }
  816. /* Ok, finally something we can handle */
  817. tsk->thread.trap_no = 1;
  818. tsk->thread.error_code = error_code;
  819. info.si_signo = SIGTRAP;
  820. info.si_errno = 0;
  821. info.si_code = TRAP_BRKPT;
  822. info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
  823. force_sig_info(SIGTRAP, &info, tsk);
  824. clear_dr7:
  825. set_debugreg(0UL, 7);
  826. preempt_conditional_cli(regs);
  827. return;
  828. clear_TF_reenable:
  829. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  830. regs->flags &= ~X86_EFLAGS_TF;
  831. preempt_conditional_cli(regs);
  832. }
  833. static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
  834. {
  835. if (fixup_exception(regs))
  836. return 1;
  837. notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
  838. /* Illegal floating point operation in the kernel */
  839. current->thread.trap_no = trapnr;
  840. die(str, regs, 0);
  841. return 0;
  842. }
  843. /*
  844. * Note that we play around with the 'TS' bit in an attempt to get
  845. * the correct behaviour even in the presence of the asynchronous
  846. * IRQ13 behaviour
  847. */
  848. asmlinkage void do_coprocessor_error(struct pt_regs *regs)
  849. {
  850. void __user *ip = (void __user *)(regs->ip);
  851. struct task_struct * task;
  852. siginfo_t info;
  853. unsigned short cwd, swd;
  854. conditional_sti(regs);
  855. if (!user_mode(regs) &&
  856. kernel_math_error(regs, "kernel x87 math error", 16))
  857. return;
  858. /*
  859. * Save the info for the exception handler and clear the error.
  860. */
  861. task = current;
  862. save_init_fpu(task);
  863. task->thread.trap_no = 16;
  864. task->thread.error_code = 0;
  865. info.si_signo = SIGFPE;
  866. info.si_errno = 0;
  867. info.si_code = __SI_FAULT;
  868. info.si_addr = ip;
  869. /*
  870. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  871. * status. 0x3f is the exception bits in these regs, 0x200 is the
  872. * C1 reg you need in case of a stack fault, 0x040 is the stack
  873. * fault bit. We should only be taking one exception at a time,
  874. * so if this combination doesn't produce any single exception,
  875. * then we have a bad program that isn't synchronizing its FPU usage
  876. * and it will suffer the consequences since we won't be able to
  877. * fully reproduce the context of the exception
  878. */
  879. cwd = get_fpu_cwd(task);
  880. swd = get_fpu_swd(task);
  881. switch (swd & ~cwd & 0x3f) {
  882. case 0x000:
  883. default:
  884. break;
  885. case 0x001: /* Invalid Op */
  886. /*
  887. * swd & 0x240 == 0x040: Stack Underflow
  888. * swd & 0x240 == 0x240: Stack Overflow
  889. * User must clear the SF bit (0x40) if set
  890. */
  891. info.si_code = FPE_FLTINV;
  892. break;
  893. case 0x002: /* Denormalize */
  894. case 0x010: /* Underflow */
  895. info.si_code = FPE_FLTUND;
  896. break;
  897. case 0x004: /* Zero Divide */
  898. info.si_code = FPE_FLTDIV;
  899. break;
  900. case 0x008: /* Overflow */
  901. info.si_code = FPE_FLTOVF;
  902. break;
  903. case 0x020: /* Precision */
  904. info.si_code = FPE_FLTRES;
  905. break;
  906. }
  907. force_sig_info(SIGFPE, &info, task);
  908. }
  909. asmlinkage void bad_intr(void)
  910. {
  911. printk("bad interrupt");
  912. }
  913. asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
  914. {
  915. void __user *ip = (void __user *)(regs->ip);
  916. struct task_struct * task;
  917. siginfo_t info;
  918. unsigned short mxcsr;
  919. conditional_sti(regs);
  920. if (!user_mode(regs) &&
  921. kernel_math_error(regs, "kernel simd math error", 19))
  922. return;
  923. /*
  924. * Save the info for the exception handler and clear the error.
  925. */
  926. task = current;
  927. save_init_fpu(task);
  928. task->thread.trap_no = 19;
  929. task->thread.error_code = 0;
  930. info.si_signo = SIGFPE;
  931. info.si_errno = 0;
  932. info.si_code = __SI_FAULT;
  933. info.si_addr = ip;
  934. /*
  935. * The SIMD FPU exceptions are handled a little differently, as there
  936. * is only a single status/control register. Thus, to determine which
  937. * unmasked exception was caught we must mask the exception mask bits
  938. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  939. */
  940. mxcsr = get_fpu_mxcsr(task);
  941. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  942. case 0x000:
  943. default:
  944. break;
  945. case 0x001: /* Invalid Op */
  946. info.si_code = FPE_FLTINV;
  947. break;
  948. case 0x002: /* Denormalize */
  949. case 0x010: /* Underflow */
  950. info.si_code = FPE_FLTUND;
  951. break;
  952. case 0x004: /* Zero Divide */
  953. info.si_code = FPE_FLTDIV;
  954. break;
  955. case 0x008: /* Overflow */
  956. info.si_code = FPE_FLTOVF;
  957. break;
  958. case 0x020: /* Precision */
  959. info.si_code = FPE_FLTRES;
  960. break;
  961. }
  962. force_sig_info(SIGFPE, &info, task);
  963. }
  964. asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
  965. {
  966. }
  967. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  968. {
  969. }
  970. asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
  971. {
  972. }
  973. /*
  974. * 'math_state_restore()' saves the current math information in the
  975. * old math state array, and gets the new ones from the current task
  976. *
  977. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  978. * Don't touch unless you *really* know how it works.
  979. */
  980. asmlinkage void math_state_restore(void)
  981. {
  982. struct task_struct *me = current;
  983. clts(); /* Allow maths ops (or we recurse) */
  984. if (!used_math())
  985. init_fpu(me);
  986. restore_fpu_checking(&me->thread.i387.fxsave);
  987. task_thread_info(me)->status |= TS_USEDFPU;
  988. me->fpu_counter++;
  989. }
  990. EXPORT_SYMBOL_GPL(math_state_restore);
  991. void __init trap_init(void)
  992. {
  993. set_intr_gate(0,&divide_error);
  994. set_intr_gate_ist(1,&debug,DEBUG_STACK);
  995. set_intr_gate_ist(2,&nmi,NMI_STACK);
  996. set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
  997. set_system_gate(4,&overflow); /* int4 can be called from all */
  998. set_intr_gate(5,&bounds);
  999. set_intr_gate(6,&invalid_op);
  1000. set_intr_gate(7,&device_not_available);
  1001. set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
  1002. set_intr_gate(9,&coprocessor_segment_overrun);
  1003. set_intr_gate(10,&invalid_TSS);
  1004. set_intr_gate(11,&segment_not_present);
  1005. set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
  1006. set_intr_gate(13,&general_protection);
  1007. set_intr_gate(14,&page_fault);
  1008. set_intr_gate(15,&spurious_interrupt_bug);
  1009. set_intr_gate(16,&coprocessor_error);
  1010. set_intr_gate(17,&alignment_check);
  1011. #ifdef CONFIG_X86_MCE
  1012. set_intr_gate_ist(18,&machine_check, MCE_STACK);
  1013. #endif
  1014. set_intr_gate(19,&simd_coprocessor_error);
  1015. #ifdef CONFIG_IA32_EMULATION
  1016. set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  1017. #endif
  1018. /*
  1019. * Should be a barrier for any external CPU state.
  1020. */
  1021. cpu_init();
  1022. }
  1023. static int __init oops_setup(char *s)
  1024. {
  1025. if (!s)
  1026. return -EINVAL;
  1027. if (!strcmp(s, "panic"))
  1028. panic_on_oops = 1;
  1029. return 0;
  1030. }
  1031. early_param("oops", oops_setup);
  1032. static int __init kstack_setup(char *s)
  1033. {
  1034. if (!s)
  1035. return -EINVAL;
  1036. kstack_depth_to_print = simple_strtoul(s,NULL,0);
  1037. return 0;
  1038. }
  1039. early_param("kstack", kstack_setup);
  1040. static int __init code_bytes_setup(char *s)
  1041. {
  1042. code_bytes = simple_strtoul(s, NULL, 0);
  1043. if (code_bytes > 8192)
  1044. code_bytes = 8192;
  1045. return 1;
  1046. }
  1047. __setup("code_bytes=", code_bytes_setup);