traps_64.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. *
  5. * Pentium III FXSR, SSE support
  6. * Gareth Hughes <gareth@valinux.com>, May 2000
  7. */
  8. /*
  9. * 'Traps.c' handles hardware traps and faults after we have saved some
  10. * state in 'entry.S'.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/errno.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/timer.h>
  18. #include <linux/mm.h>
  19. #include <linux/init.h>
  20. #include <linux/delay.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/kallsyms.h>
  24. #include <linux/module.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/nmi.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/kexec.h>
  29. #include <linux/unwind.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/bug.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/utsname.h>
  34. #include <mach_traps.h>
  35. #if defined(CONFIG_EDAC)
  36. #include <linux/edac.h>
  37. #endif
  38. #include <asm/system.h>
  39. #include <asm/io.h>
  40. #include <asm/atomic.h>
  41. #include <asm/debugreg.h>
  42. #include <asm/desc.h>
  43. #include <asm/i387.h>
  44. #include <asm/processor.h>
  45. #include <asm/unwind.h>
  46. #include <asm/smp.h>
  47. #include <asm/pgalloc.h>
  48. #include <asm/pda.h>
  49. #include <asm/proto.h>
  50. #include <asm/nmi.h>
  51. #include <asm/stacktrace.h>
  52. asmlinkage void divide_error(void);
  53. asmlinkage void debug(void);
  54. asmlinkage void nmi(void);
  55. asmlinkage void int3(void);
  56. asmlinkage void overflow(void);
  57. asmlinkage void bounds(void);
  58. asmlinkage void invalid_op(void);
  59. asmlinkage void device_not_available(void);
  60. asmlinkage void double_fault(void);
  61. asmlinkage void coprocessor_segment_overrun(void);
  62. asmlinkage void invalid_TSS(void);
  63. asmlinkage void segment_not_present(void);
  64. asmlinkage void stack_segment(void);
  65. asmlinkage void general_protection(void);
  66. asmlinkage void page_fault(void);
  67. asmlinkage void coprocessor_error(void);
  68. asmlinkage void simd_coprocessor_error(void);
  69. asmlinkage void reserved(void);
  70. asmlinkage void alignment_check(void);
  71. asmlinkage void machine_check(void);
  72. asmlinkage void spurious_interrupt_bug(void);
  73. static unsigned int code_bytes = 64;
  74. static inline void conditional_sti(struct pt_regs *regs)
  75. {
  76. if (regs->flags & X86_EFLAGS_IF)
  77. local_irq_enable();
  78. }
  79. static inline void preempt_conditional_sti(struct pt_regs *regs)
  80. {
  81. inc_preempt_count();
  82. if (regs->flags & X86_EFLAGS_IF)
  83. local_irq_enable();
  84. }
  85. static inline void preempt_conditional_cli(struct pt_regs *regs)
  86. {
  87. if (regs->flags & X86_EFLAGS_IF)
  88. local_irq_disable();
  89. /* Make sure to not schedule here because we could be running
  90. on an exception stack. */
  91. dec_preempt_count();
  92. }
  93. int kstack_depth_to_print = 12;
  94. void printk_address(unsigned long address, int reliable)
  95. {
  96. #ifdef CONFIG_KALLSYMS
  97. unsigned long offset = 0, symsize;
  98. const char *symname;
  99. char *modname;
  100. char *delim = ":";
  101. char namebuf[KSYM_NAME_LEN];
  102. char reliab[4] = "";
  103. symname = kallsyms_lookup(address, &symsize, &offset,
  104. &modname, namebuf);
  105. if (!symname) {
  106. printk(" [<%016lx>]\n", address);
  107. return;
  108. }
  109. if (!reliable)
  110. strcpy(reliab, "? ");
  111. if (!modname)
  112. modname = delim = "";
  113. printk(" [<%016lx>] %s%s%s%s%s+0x%lx/0x%lx\n",
  114. address, reliab, delim, modname, delim, symname, offset, symsize);
  115. #else
  116. printk(" [<%016lx>]\n", address);
  117. #endif
  118. }
  119. static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
  120. unsigned *usedp, char **idp)
  121. {
  122. static char ids[][8] = {
  123. [DEBUG_STACK - 1] = "#DB",
  124. [NMI_STACK - 1] = "NMI",
  125. [DOUBLEFAULT_STACK - 1] = "#DF",
  126. [STACKFAULT_STACK - 1] = "#SS",
  127. [MCE_STACK - 1] = "#MC",
  128. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  129. [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
  130. #endif
  131. };
  132. unsigned k;
  133. /*
  134. * Iterate over all exception stacks, and figure out whether
  135. * 'stack' is in one of them:
  136. */
  137. for (k = 0; k < N_EXCEPTION_STACKS; k++) {
  138. unsigned long end = per_cpu(orig_ist, cpu).ist[k];
  139. /*
  140. * Is 'stack' above this exception frame's end?
  141. * If yes then skip to the next frame.
  142. */
  143. if (stack >= end)
  144. continue;
  145. /*
  146. * Is 'stack' above this exception frame's start address?
  147. * If yes then we found the right frame.
  148. */
  149. if (stack >= end - EXCEPTION_STKSZ) {
  150. /*
  151. * Make sure we only iterate through an exception
  152. * stack once. If it comes up for the second time
  153. * then there's something wrong going on - just
  154. * break out and return NULL:
  155. */
  156. if (*usedp & (1U << k))
  157. break;
  158. *usedp |= 1U << k;
  159. *idp = ids[k];
  160. return (unsigned long *)end;
  161. }
  162. /*
  163. * If this is a debug stack, and if it has a larger size than
  164. * the usual exception stacks, then 'stack' might still
  165. * be within the lower portion of the debug stack:
  166. */
  167. #if DEBUG_STKSZ > EXCEPTION_STKSZ
  168. if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
  169. unsigned j = N_EXCEPTION_STACKS - 1;
  170. /*
  171. * Black magic. A large debug stack is composed of
  172. * multiple exception stack entries, which we
  173. * iterate through now. Dont look:
  174. */
  175. do {
  176. ++j;
  177. end -= EXCEPTION_STKSZ;
  178. ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
  179. } while (stack < end - EXCEPTION_STKSZ);
  180. if (*usedp & (1U << j))
  181. break;
  182. *usedp |= 1U << j;
  183. *idp = ids[j];
  184. return (unsigned long *)end;
  185. }
  186. #endif
  187. }
  188. return NULL;
  189. }
  190. #define MSG(txt) ops->warning(data, txt)
  191. /*
  192. * x86-64 can have up to three kernel stacks:
  193. * process stack
  194. * interrupt stack
  195. * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  196. */
  197. static inline int valid_stack_ptr(struct thread_info *tinfo,
  198. void *p, unsigned int size, void *end)
  199. {
  200. void *t = tinfo;
  201. if (end) {
  202. if (p < end && p >= (end-THREAD_SIZE))
  203. return 1;
  204. else
  205. return 0;
  206. }
  207. return p > t && p < t + THREAD_SIZE - size;
  208. }
  209. /* The form of the top of the frame on the stack */
  210. struct stack_frame {
  211. struct stack_frame *next_frame;
  212. unsigned long return_address;
  213. };
  214. static inline unsigned long print_context_stack(struct thread_info *tinfo,
  215. unsigned long *stack, unsigned long bp,
  216. const struct stacktrace_ops *ops, void *data,
  217. unsigned long *end)
  218. {
  219. struct stack_frame *frame = (struct stack_frame *)bp;
  220. while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
  221. unsigned long addr;
  222. addr = *stack;
  223. if (__kernel_text_address(addr)) {
  224. if ((unsigned long) stack == bp + 8) {
  225. ops->address(data, addr, 1);
  226. frame = frame->next_frame;
  227. bp = (unsigned long) frame;
  228. } else {
  229. ops->address(data, addr, bp == 0);
  230. }
  231. }
  232. stack++;
  233. }
  234. return bp;
  235. }
  236. void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
  237. unsigned long *stack, unsigned long bp,
  238. const struct stacktrace_ops *ops, void *data)
  239. {
  240. const unsigned cpu = get_cpu();
  241. unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
  242. unsigned used = 0;
  243. struct thread_info *tinfo;
  244. if (!tsk)
  245. tsk = current;
  246. tinfo = task_thread_info(tsk);
  247. if (!stack) {
  248. unsigned long dummy;
  249. stack = &dummy;
  250. if (tsk && tsk != current)
  251. stack = (unsigned long *)tsk->thread.sp;
  252. }
  253. #ifdef CONFIG_FRAME_POINTER
  254. if (!bp) {
  255. if (tsk == current) {
  256. /* Grab bp right from our regs */
  257. asm("movq %%rbp, %0" : "=r" (bp):);
  258. } else {
  259. /* bp is the last reg pushed by switch_to */
  260. bp = *(unsigned long *) tsk->thread.sp;
  261. }
  262. }
  263. #endif
  264. /*
  265. * Print function call entries in all stacks, starting at the
  266. * current stack address. If the stacks consist of nested
  267. * exceptions
  268. */
  269. for (;;) {
  270. char *id;
  271. unsigned long *estack_end;
  272. estack_end = in_exception_stack(cpu, (unsigned long)stack,
  273. &used, &id);
  274. if (estack_end) {
  275. if (ops->stack(data, id) < 0)
  276. break;
  277. bp = print_context_stack(tinfo, stack, bp, ops,
  278. data, estack_end);
  279. ops->stack(data, "<EOE>");
  280. /*
  281. * We link to the next stack via the
  282. * second-to-last pointer (index -2 to end) in the
  283. * exception stack:
  284. */
  285. stack = (unsigned long *) estack_end[-2];
  286. continue;
  287. }
  288. if (irqstack_end) {
  289. unsigned long *irqstack;
  290. irqstack = irqstack_end -
  291. (IRQSTACKSIZE - 64) / sizeof(*irqstack);
  292. if (stack >= irqstack && stack < irqstack_end) {
  293. if (ops->stack(data, "IRQ") < 0)
  294. break;
  295. bp = print_context_stack(tinfo, stack, bp,
  296. ops, data, irqstack_end);
  297. /*
  298. * We link to the next stack (which would be
  299. * the process stack normally) the last
  300. * pointer (index -1 to end) in the IRQ stack:
  301. */
  302. stack = (unsigned long *) (irqstack_end[-1]);
  303. irqstack_end = NULL;
  304. ops->stack(data, "EOI");
  305. continue;
  306. }
  307. }
  308. break;
  309. }
  310. /*
  311. * This handles the process stack:
  312. */
  313. bp = print_context_stack(tinfo, stack, bp, ops, data, NULL);
  314. put_cpu();
  315. }
  316. EXPORT_SYMBOL(dump_trace);
  317. static void
  318. print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
  319. {
  320. print_symbol(msg, symbol);
  321. printk("\n");
  322. }
  323. static void print_trace_warning(void *data, char *msg)
  324. {
  325. printk("%s\n", msg);
  326. }
  327. static int print_trace_stack(void *data, char *name)
  328. {
  329. printk(" <%s> ", name);
  330. return 0;
  331. }
  332. static void print_trace_address(void *data, unsigned long addr, int reliable)
  333. {
  334. touch_nmi_watchdog();
  335. printk_address(addr, reliable);
  336. }
  337. static const struct stacktrace_ops print_trace_ops = {
  338. .warning = print_trace_warning,
  339. .warning_symbol = print_trace_warning_symbol,
  340. .stack = print_trace_stack,
  341. .address = print_trace_address,
  342. };
  343. void
  344. show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack,
  345. unsigned long bp)
  346. {
  347. printk("\nCall Trace:\n");
  348. dump_trace(tsk, regs, stack, bp, &print_trace_ops, NULL);
  349. printk("\n");
  350. }
  351. static void
  352. _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp,
  353. unsigned long bp)
  354. {
  355. unsigned long *stack;
  356. int i;
  357. const int cpu = smp_processor_id();
  358. unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
  359. unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
  360. // debugging aid: "show_stack(NULL, NULL);" prints the
  361. // back trace for this cpu.
  362. if (sp == NULL) {
  363. if (tsk)
  364. sp = (unsigned long *)tsk->thread.sp;
  365. else
  366. sp = (unsigned long *)&sp;
  367. }
  368. stack = sp;
  369. for(i=0; i < kstack_depth_to_print; i++) {
  370. if (stack >= irqstack && stack <= irqstack_end) {
  371. if (stack == irqstack_end) {
  372. stack = (unsigned long *) (irqstack_end[-1]);
  373. printk(" <EOI> ");
  374. }
  375. } else {
  376. if (((long) stack & (THREAD_SIZE-1)) == 0)
  377. break;
  378. }
  379. if (i && ((i % 4) == 0))
  380. printk("\n");
  381. printk(" %016lx", *stack++);
  382. touch_nmi_watchdog();
  383. }
  384. show_trace(tsk, regs, sp, bp);
  385. }
  386. void show_stack(struct task_struct *tsk, unsigned long * sp)
  387. {
  388. _show_stack(tsk, NULL, sp, 0);
  389. }
  390. /*
  391. * The architecture-independent dump_stack generator
  392. */
  393. void dump_stack(void)
  394. {
  395. unsigned long dummy;
  396. unsigned long bp = 0;
  397. #ifdef CONFIG_FRAME_POINTER
  398. if (!bp)
  399. asm("movq %%rbp, %0" : "=r" (bp):);
  400. #endif
  401. printk("Pid: %d, comm: %.20s %s %s %.*s\n",
  402. current->pid, current->comm, print_tainted(),
  403. init_utsname()->release,
  404. (int)strcspn(init_utsname()->version, " "),
  405. init_utsname()->version);
  406. show_trace(NULL, NULL, &dummy, bp);
  407. }
  408. EXPORT_SYMBOL(dump_stack);
  409. void show_registers(struct pt_regs *regs)
  410. {
  411. int i;
  412. unsigned long sp;
  413. const int cpu = smp_processor_id();
  414. struct task_struct *cur = cpu_pda(cpu)->pcurrent;
  415. u8 *ip;
  416. unsigned int code_prologue = code_bytes * 43 / 64;
  417. unsigned int code_len = code_bytes;
  418. sp = regs->sp;
  419. ip = (u8 *) regs->ip - code_prologue;
  420. printk("CPU %d ", cpu);
  421. __show_regs(regs);
  422. printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
  423. cur->comm, cur->pid, task_thread_info(cur), cur);
  424. /*
  425. * When in-kernel, we also print out the stack and code at the
  426. * time of the fault..
  427. */
  428. if (!user_mode(regs)) {
  429. unsigned char c;
  430. printk("Stack: ");
  431. _show_stack(NULL, regs, (unsigned long *)sp, regs->bp);
  432. printk("\n");
  433. printk(KERN_EMERG "Code: ");
  434. if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
  435. /* try starting at RIP */
  436. ip = (u8 *) regs->ip;
  437. code_len = code_len - code_prologue + 1;
  438. }
  439. for (i = 0; i < code_len; i++, ip++) {
  440. if (ip < (u8 *)PAGE_OFFSET ||
  441. probe_kernel_address(ip, c)) {
  442. printk(" Bad RIP value.");
  443. break;
  444. }
  445. if (ip == (u8 *)regs->ip)
  446. printk("<%02x> ", c);
  447. else
  448. printk("%02x ", c);
  449. }
  450. }
  451. printk("\n");
  452. }
  453. int is_valid_bugaddr(unsigned long ip)
  454. {
  455. unsigned short ud2;
  456. if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
  457. return 0;
  458. return ud2 == 0x0b0f;
  459. }
  460. static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
  461. static int die_owner = -1;
  462. static unsigned int die_nest_count;
  463. unsigned __kprobes long oops_begin(void)
  464. {
  465. int cpu;
  466. unsigned long flags;
  467. oops_enter();
  468. /* racy, but better than risking deadlock. */
  469. raw_local_irq_save(flags);
  470. cpu = smp_processor_id();
  471. if (!__raw_spin_trylock(&die_lock)) {
  472. if (cpu == die_owner)
  473. /* nested oops. should stop eventually */;
  474. else
  475. __raw_spin_lock(&die_lock);
  476. }
  477. die_nest_count++;
  478. die_owner = cpu;
  479. console_verbose();
  480. bust_spinlocks(1);
  481. return flags;
  482. }
  483. void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
  484. {
  485. die_owner = -1;
  486. bust_spinlocks(0);
  487. die_nest_count--;
  488. if (!die_nest_count)
  489. /* Nest count reaches zero, release the lock. */
  490. __raw_spin_unlock(&die_lock);
  491. raw_local_irq_restore(flags);
  492. if (!regs) {
  493. oops_exit();
  494. return;
  495. }
  496. if (panic_on_oops)
  497. panic("Fatal exception");
  498. oops_exit();
  499. do_exit(signr);
  500. }
  501. int __kprobes __die(const char * str, struct pt_regs * regs, long err)
  502. {
  503. static int die_counter;
  504. printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
  505. #ifdef CONFIG_PREEMPT
  506. printk("PREEMPT ");
  507. #endif
  508. #ifdef CONFIG_SMP
  509. printk("SMP ");
  510. #endif
  511. #ifdef CONFIG_DEBUG_PAGEALLOC
  512. printk("DEBUG_PAGEALLOC");
  513. #endif
  514. printk("\n");
  515. if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
  516. return 1;
  517. show_registers(regs);
  518. add_taint(TAINT_DIE);
  519. /* Executive summary in case the oops scrolled away */
  520. printk(KERN_ALERT "RIP ");
  521. printk_address(regs->ip, 1);
  522. printk(" RSP <%016lx>\n", regs->sp);
  523. if (kexec_should_crash(current))
  524. crash_kexec(regs);
  525. return 0;
  526. }
  527. void die(const char * str, struct pt_regs * regs, long err)
  528. {
  529. unsigned long flags = oops_begin();
  530. if (!user_mode(regs))
  531. report_bug(regs->ip, regs);
  532. if (__die(str, regs, err))
  533. regs = NULL;
  534. oops_end(flags, regs, SIGSEGV);
  535. }
  536. void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
  537. {
  538. unsigned long flags = oops_begin();
  539. /*
  540. * We are in trouble anyway, lets at least try
  541. * to get a message out.
  542. */
  543. printk(str, smp_processor_id());
  544. show_registers(regs);
  545. if (kexec_should_crash(current))
  546. crash_kexec(regs);
  547. if (do_panic || panic_on_oops)
  548. panic("Non maskable interrupt");
  549. oops_end(flags, NULL, SIGBUS);
  550. nmi_exit();
  551. local_irq_enable();
  552. do_exit(SIGBUS);
  553. }
  554. static void __kprobes do_trap(int trapnr, int signr, char *str,
  555. struct pt_regs * regs, long error_code,
  556. siginfo_t *info)
  557. {
  558. struct task_struct *tsk = current;
  559. if (user_mode(regs)) {
  560. /*
  561. * We want error_code and trap_no set for userspace
  562. * faults and kernelspace faults which result in
  563. * die(), but not kernelspace faults which are fixed
  564. * up. die() gives the process no chance to handle
  565. * the signal and notice the kernel fault information,
  566. * so that won't result in polluting the information
  567. * about previously queued, but not yet delivered,
  568. * faults. See also do_general_protection below.
  569. */
  570. tsk->thread.error_code = error_code;
  571. tsk->thread.trap_no = trapnr;
  572. if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  573. printk_ratelimit()) {
  574. printk(KERN_INFO
  575. "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
  576. tsk->comm, tsk->pid, str,
  577. regs->ip, regs->sp, error_code);
  578. print_vma_addr(" in ", regs->ip);
  579. printk("\n");
  580. }
  581. if (info)
  582. force_sig_info(signr, info, tsk);
  583. else
  584. force_sig(signr, tsk);
  585. return;
  586. }
  587. if (!fixup_exception(regs)) {
  588. tsk->thread.error_code = error_code;
  589. tsk->thread.trap_no = trapnr;
  590. die(str, regs, error_code);
  591. }
  592. return;
  593. }
  594. #define DO_ERROR(trapnr, signr, str, name) \
  595. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  596. { \
  597. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  598. == NOTIFY_STOP) \
  599. return; \
  600. conditional_sti(regs); \
  601. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  602. }
  603. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  604. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  605. { \
  606. siginfo_t info; \
  607. info.si_signo = signr; \
  608. info.si_errno = 0; \
  609. info.si_code = sicode; \
  610. info.si_addr = (void __user *)siaddr; \
  611. trace_hardirqs_fixup(); \
  612. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  613. == NOTIFY_STOP) \
  614. return; \
  615. conditional_sti(regs); \
  616. do_trap(trapnr, signr, str, regs, error_code, &info); \
  617. }
  618. DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
  619. DO_ERROR( 4, SIGSEGV, "overflow", overflow)
  620. DO_ERROR( 5, SIGSEGV, "bounds", bounds)
  621. DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
  622. DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
  623. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  624. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  625. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  626. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  627. DO_ERROR(18, SIGSEGV, "reserved", reserved)
  628. /* Runs on IST stack */
  629. asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  630. {
  631. if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
  632. 12, SIGBUS) == NOTIFY_STOP)
  633. return;
  634. preempt_conditional_sti(regs);
  635. do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
  636. preempt_conditional_cli(regs);
  637. }
  638. asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
  639. {
  640. static const char str[] = "double fault";
  641. struct task_struct *tsk = current;
  642. /* Return not checked because double check cannot be ignored */
  643. notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
  644. tsk->thread.error_code = error_code;
  645. tsk->thread.trap_no = 8;
  646. /* This is always a kernel trap and never fixable (and thus must
  647. never return). */
  648. for (;;)
  649. die(str, regs, error_code);
  650. }
  651. asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
  652. long error_code)
  653. {
  654. struct task_struct *tsk = current;
  655. conditional_sti(regs);
  656. if (user_mode(regs)) {
  657. tsk->thread.error_code = error_code;
  658. tsk->thread.trap_no = 13;
  659. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  660. printk_ratelimit()) {
  661. printk(KERN_INFO
  662. "%s[%d] general protection ip:%lx sp:%lx error:%lx",
  663. tsk->comm, tsk->pid,
  664. regs->ip, regs->sp, error_code);
  665. print_vma_addr(" in ", regs->ip);
  666. printk("\n");
  667. }
  668. force_sig(SIGSEGV, tsk);
  669. return;
  670. }
  671. if (fixup_exception(regs))
  672. return;
  673. tsk->thread.error_code = error_code;
  674. tsk->thread.trap_no = 13;
  675. if (notify_die(DIE_GPF, "general protection fault", regs,
  676. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  677. return;
  678. die("general protection fault", regs, error_code);
  679. }
  680. static __kprobes void
  681. mem_parity_error(unsigned char reason, struct pt_regs * regs)
  682. {
  683. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  684. reason);
  685. printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
  686. #if defined(CONFIG_EDAC)
  687. if(edac_handler_set()) {
  688. edac_atomic_assert_error();
  689. return;
  690. }
  691. #endif
  692. if (panic_on_unrecovered_nmi)
  693. panic("NMI: Not continuing");
  694. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  695. /* Clear and disable the memory parity error line. */
  696. reason = (reason & 0xf) | 4;
  697. outb(reason, 0x61);
  698. }
  699. static __kprobes void
  700. io_check_error(unsigned char reason, struct pt_regs * regs)
  701. {
  702. printk("NMI: IOCK error (debug interrupt?)\n");
  703. show_registers(regs);
  704. /* Re-enable the IOCK line, wait for a few seconds */
  705. reason = (reason & 0xf) | 8;
  706. outb(reason, 0x61);
  707. mdelay(2000);
  708. reason &= ~8;
  709. outb(reason, 0x61);
  710. }
  711. static __kprobes void
  712. unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  713. {
  714. printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
  715. reason);
  716. printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
  717. if (panic_on_unrecovered_nmi)
  718. panic("NMI: Not continuing");
  719. printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
  720. }
  721. /* Runs on IST stack. This code must keep interrupts off all the time.
  722. Nested NMIs are prevented by the CPU. */
  723. asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
  724. {
  725. unsigned char reason = 0;
  726. int cpu;
  727. cpu = smp_processor_id();
  728. /* Only the BSP gets external NMIs from the system. */
  729. if (!cpu)
  730. reason = get_nmi_reason();
  731. if (!(reason & 0xc0)) {
  732. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
  733. == NOTIFY_STOP)
  734. return;
  735. /*
  736. * Ok, so this is none of the documented NMI sources,
  737. * so it must be the NMI watchdog.
  738. */
  739. if (nmi_watchdog_tick(regs,reason))
  740. return;
  741. if (!do_nmi_callback(regs,cpu))
  742. unknown_nmi_error(reason, regs);
  743. return;
  744. }
  745. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
  746. return;
  747. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  748. if (reason & 0x80)
  749. mem_parity_error(reason, regs);
  750. if (reason & 0x40)
  751. io_check_error(reason, regs);
  752. }
  753. /* runs on IST stack. */
  754. asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
  755. {
  756. trace_hardirqs_fixup();
  757. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
  758. return;
  759. }
  760. preempt_conditional_sti(regs);
  761. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  762. preempt_conditional_cli(regs);
  763. }
  764. /* Help handler running on IST stack to switch back to user stack
  765. for scheduling or signal handling. The actual stack switch is done in
  766. entry.S */
  767. asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
  768. {
  769. struct pt_regs *regs = eregs;
  770. /* Did already sync */
  771. if (eregs == (struct pt_regs *)eregs->sp)
  772. ;
  773. /* Exception from user space */
  774. else if (user_mode(eregs))
  775. regs = task_pt_regs(current);
  776. /* Exception from kernel and interrupts are enabled. Move to
  777. kernel process stack. */
  778. else if (eregs->flags & X86_EFLAGS_IF)
  779. regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
  780. if (eregs != regs)
  781. *regs = *eregs;
  782. return regs;
  783. }
  784. /* runs on IST stack. */
  785. asmlinkage void __kprobes do_debug(struct pt_regs * regs,
  786. unsigned long error_code)
  787. {
  788. unsigned long condition;
  789. struct task_struct *tsk = current;
  790. siginfo_t info;
  791. trace_hardirqs_fixup();
  792. get_debugreg(condition, 6);
  793. /*
  794. * The processor cleared BTF, so don't mark that we need it set.
  795. */
  796. clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
  797. tsk->thread.debugctlmsr = 0;
  798. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  799. SIGTRAP) == NOTIFY_STOP)
  800. return;
  801. preempt_conditional_sti(regs);
  802. /* Mask out spurious debug traps due to lazy DR7 setting */
  803. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  804. if (!tsk->thread.debugreg7) {
  805. goto clear_dr7;
  806. }
  807. }
  808. tsk->thread.debugreg6 = condition;
  809. /*
  810. * Single-stepping through TF: make sure we ignore any events in
  811. * kernel space (but re-enable TF when returning to user mode).
  812. */
  813. if (condition & DR_STEP) {
  814. if (!user_mode(regs))
  815. goto clear_TF_reenable;
  816. }
  817. /* Ok, finally something we can handle */
  818. tsk->thread.trap_no = 1;
  819. tsk->thread.error_code = error_code;
  820. info.si_signo = SIGTRAP;
  821. info.si_errno = 0;
  822. info.si_code = TRAP_BRKPT;
  823. info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
  824. force_sig_info(SIGTRAP, &info, tsk);
  825. clear_dr7:
  826. set_debugreg(0UL, 7);
  827. preempt_conditional_cli(regs);
  828. return;
  829. clear_TF_reenable:
  830. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  831. regs->flags &= ~X86_EFLAGS_TF;
  832. preempt_conditional_cli(regs);
  833. }
  834. static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
  835. {
  836. if (fixup_exception(regs))
  837. return 1;
  838. notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
  839. /* Illegal floating point operation in the kernel */
  840. current->thread.trap_no = trapnr;
  841. die(str, regs, 0);
  842. return 0;
  843. }
  844. /*
  845. * Note that we play around with the 'TS' bit in an attempt to get
  846. * the correct behaviour even in the presence of the asynchronous
  847. * IRQ13 behaviour
  848. */
  849. asmlinkage void do_coprocessor_error(struct pt_regs *regs)
  850. {
  851. void __user *ip = (void __user *)(regs->ip);
  852. struct task_struct * task;
  853. siginfo_t info;
  854. unsigned short cwd, swd;
  855. conditional_sti(regs);
  856. if (!user_mode(regs) &&
  857. kernel_math_error(regs, "kernel x87 math error", 16))
  858. return;
  859. /*
  860. * Save the info for the exception handler and clear the error.
  861. */
  862. task = current;
  863. save_init_fpu(task);
  864. task->thread.trap_no = 16;
  865. task->thread.error_code = 0;
  866. info.si_signo = SIGFPE;
  867. info.si_errno = 0;
  868. info.si_code = __SI_FAULT;
  869. info.si_addr = ip;
  870. /*
  871. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  872. * status. 0x3f is the exception bits in these regs, 0x200 is the
  873. * C1 reg you need in case of a stack fault, 0x040 is the stack
  874. * fault bit. We should only be taking one exception at a time,
  875. * so if this combination doesn't produce any single exception,
  876. * then we have a bad program that isn't synchronizing its FPU usage
  877. * and it will suffer the consequences since we won't be able to
  878. * fully reproduce the context of the exception
  879. */
  880. cwd = get_fpu_cwd(task);
  881. swd = get_fpu_swd(task);
  882. switch (swd & ~cwd & 0x3f) {
  883. case 0x000:
  884. default:
  885. break;
  886. case 0x001: /* Invalid Op */
  887. /*
  888. * swd & 0x240 == 0x040: Stack Underflow
  889. * swd & 0x240 == 0x240: Stack Overflow
  890. * User must clear the SF bit (0x40) if set
  891. */
  892. info.si_code = FPE_FLTINV;
  893. break;
  894. case 0x002: /* Denormalize */
  895. case 0x010: /* Underflow */
  896. info.si_code = FPE_FLTUND;
  897. break;
  898. case 0x004: /* Zero Divide */
  899. info.si_code = FPE_FLTDIV;
  900. break;
  901. case 0x008: /* Overflow */
  902. info.si_code = FPE_FLTOVF;
  903. break;
  904. case 0x020: /* Precision */
  905. info.si_code = FPE_FLTRES;
  906. break;
  907. }
  908. force_sig_info(SIGFPE, &info, task);
  909. }
  910. asmlinkage void bad_intr(void)
  911. {
  912. printk("bad interrupt");
  913. }
  914. asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
  915. {
  916. void __user *ip = (void __user *)(regs->ip);
  917. struct task_struct * task;
  918. siginfo_t info;
  919. unsigned short mxcsr;
  920. conditional_sti(regs);
  921. if (!user_mode(regs) &&
  922. kernel_math_error(regs, "kernel simd math error", 19))
  923. return;
  924. /*
  925. * Save the info for the exception handler and clear the error.
  926. */
  927. task = current;
  928. save_init_fpu(task);
  929. task->thread.trap_no = 19;
  930. task->thread.error_code = 0;
  931. info.si_signo = SIGFPE;
  932. info.si_errno = 0;
  933. info.si_code = __SI_FAULT;
  934. info.si_addr = ip;
  935. /*
  936. * The SIMD FPU exceptions are handled a little differently, as there
  937. * is only a single status/control register. Thus, to determine which
  938. * unmasked exception was caught we must mask the exception mask bits
  939. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  940. */
  941. mxcsr = get_fpu_mxcsr(task);
  942. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  943. case 0x000:
  944. default:
  945. break;
  946. case 0x001: /* Invalid Op */
  947. info.si_code = FPE_FLTINV;
  948. break;
  949. case 0x002: /* Denormalize */
  950. case 0x010: /* Underflow */
  951. info.si_code = FPE_FLTUND;
  952. break;
  953. case 0x004: /* Zero Divide */
  954. info.si_code = FPE_FLTDIV;
  955. break;
  956. case 0x008: /* Overflow */
  957. info.si_code = FPE_FLTOVF;
  958. break;
  959. case 0x020: /* Precision */
  960. info.si_code = FPE_FLTRES;
  961. break;
  962. }
  963. force_sig_info(SIGFPE, &info, task);
  964. }
  965. asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
  966. {
  967. }
  968. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  969. {
  970. }
  971. asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
  972. {
  973. }
  974. /*
  975. * 'math_state_restore()' saves the current math information in the
  976. * old math state array, and gets the new ones from the current task
  977. *
  978. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  979. * Don't touch unless you *really* know how it works.
  980. */
  981. asmlinkage void math_state_restore(void)
  982. {
  983. struct task_struct *me = current;
  984. clts(); /* Allow maths ops (or we recurse) */
  985. if (!used_math())
  986. init_fpu(me);
  987. restore_fpu_checking(&me->thread.i387.fxsave);
  988. task_thread_info(me)->status |= TS_USEDFPU;
  989. me->fpu_counter++;
  990. }
  991. EXPORT_SYMBOL_GPL(math_state_restore);
  992. void __init trap_init(void)
  993. {
  994. set_intr_gate(0,&divide_error);
  995. set_intr_gate_ist(1,&debug,DEBUG_STACK);
  996. set_intr_gate_ist(2,&nmi,NMI_STACK);
  997. set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
  998. set_system_gate(4,&overflow); /* int4 can be called from all */
  999. set_intr_gate(5,&bounds);
  1000. set_intr_gate(6,&invalid_op);
  1001. set_intr_gate(7,&device_not_available);
  1002. set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
  1003. set_intr_gate(9,&coprocessor_segment_overrun);
  1004. set_intr_gate(10,&invalid_TSS);
  1005. set_intr_gate(11,&segment_not_present);
  1006. set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
  1007. set_intr_gate(13,&general_protection);
  1008. set_intr_gate(14,&page_fault);
  1009. set_intr_gate(15,&spurious_interrupt_bug);
  1010. set_intr_gate(16,&coprocessor_error);
  1011. set_intr_gate(17,&alignment_check);
  1012. #ifdef CONFIG_X86_MCE
  1013. set_intr_gate_ist(18,&machine_check, MCE_STACK);
  1014. #endif
  1015. set_intr_gate(19,&simd_coprocessor_error);
  1016. #ifdef CONFIG_IA32_EMULATION
  1017. set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  1018. #endif
  1019. /*
  1020. * Should be a barrier for any external CPU state.
  1021. */
  1022. cpu_init();
  1023. }
  1024. static int __init oops_setup(char *s)
  1025. {
  1026. if (!s)
  1027. return -EINVAL;
  1028. if (!strcmp(s, "panic"))
  1029. panic_on_oops = 1;
  1030. return 0;
  1031. }
  1032. early_param("oops", oops_setup);
  1033. static int __init kstack_setup(char *s)
  1034. {
  1035. if (!s)
  1036. return -EINVAL;
  1037. kstack_depth_to_print = simple_strtoul(s,NULL,0);
  1038. return 0;
  1039. }
  1040. early_param("kstack", kstack_setup);
  1041. static int __init code_bytes_setup(char *s)
  1042. {
  1043. code_bytes = simple_strtoul(s, NULL, 0);
  1044. if (code_bytes > 8192)
  1045. code_bytes = 8192;
  1046. return 1;
  1047. }
  1048. __setup("code_bytes=", code_bytes_setup);