traps.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  5. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  6. *
  7. * Derived from "arch/i386/kernel/traps.c"
  8. * Copyright (C) 1991, 1992 Linus Torvalds
  9. */
  10. /*
  11. * 'Traps.c' handles hardware traps and faults after we have saved some
  12. * state in 'asm.s'.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/timer.h>
  20. #include <linux/mm.h>
  21. #include <linux/smp.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/delay.h>
  26. #include <linux/module.h>
  27. #include <linux/kdebug.h>
  28. #include <linux/kallsyms.h>
  29. #include <linux/reboot.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/bug.h>
  32. #include <linux/utsname.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/io.h>
  35. #include <linux/atomic.h>
  36. #include <asm/mathemu.h>
  37. #include <asm/cpcmd.h>
  38. #include <asm/lowcore.h>
  39. #include <asm/debug.h>
  40. #include <asm/ipl.h>
  41. #include "entry.h"
  42. int show_unhandled_signals = 1;
  43. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  44. #ifndef CONFIG_64BIT
  45. #define LONG "%08lx "
  46. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  47. static int kstack_depth_to_print = 12;
  48. #else /* CONFIG_64BIT */
  49. #define LONG "%016lx "
  50. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  51. static int kstack_depth_to_print = 20;
  52. #endif /* CONFIG_64BIT */
  53. static inline void __user *get_trap_ip(struct pt_regs *regs)
  54. {
  55. #ifdef CONFIG_64BIT
  56. unsigned long address;
  57. if (regs->int_code & 0x200)
  58. address = *(unsigned long *)(current->thread.trap_tdb + 24);
  59. else
  60. address = regs->psw.addr;
  61. return (void __user *)
  62. ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  63. #else
  64. return (void __user *)
  65. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  66. #endif
  67. }
  68. /*
  69. * For show_trace we have tree different stack to consider:
  70. * - the panic stack which is used if the kernel stack has overflown
  71. * - the asynchronous interrupt stack (cpu related)
  72. * - the synchronous kernel stack (process related)
  73. * The stack trace can start at any of the three stack and can potentially
  74. * touch all of them. The order is: panic stack, async stack, sync stack.
  75. */
  76. static unsigned long
  77. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  78. {
  79. struct stack_frame *sf;
  80. struct pt_regs *regs;
  81. while (1) {
  82. sp = sp & PSW_ADDR_INSN;
  83. if (sp < low || sp > high - sizeof(*sf))
  84. return sp;
  85. sf = (struct stack_frame *) sp;
  86. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  87. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  88. /* Follow the backchain. */
  89. while (1) {
  90. low = sp;
  91. sp = sf->back_chain & PSW_ADDR_INSN;
  92. if (!sp)
  93. break;
  94. if (sp <= low || sp > high - sizeof(*sf))
  95. return sp;
  96. sf = (struct stack_frame *) sp;
  97. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  98. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  99. }
  100. /* Zero backchain detected, check for interrupt frame. */
  101. sp = (unsigned long) (sf + 1);
  102. if (sp <= low || sp > high - sizeof(*regs))
  103. return sp;
  104. regs = (struct pt_regs *) sp;
  105. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  106. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  107. low = sp;
  108. sp = regs->gprs[15];
  109. }
  110. }
  111. static void show_trace(struct task_struct *task, unsigned long *stack)
  112. {
  113. register unsigned long __r15 asm ("15");
  114. unsigned long sp;
  115. sp = (unsigned long) stack;
  116. if (!sp)
  117. sp = task ? task->thread.ksp : __r15;
  118. printk("Call Trace:\n");
  119. #ifdef CONFIG_CHECK_STACK
  120. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  121. S390_lowcore.panic_stack);
  122. #endif
  123. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  124. S390_lowcore.async_stack);
  125. if (task)
  126. __show_trace(sp, (unsigned long) task_stack_page(task),
  127. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  128. else
  129. __show_trace(sp, S390_lowcore.thread_info,
  130. S390_lowcore.thread_info + THREAD_SIZE);
  131. if (!task)
  132. task = current;
  133. debug_show_held_locks(task);
  134. }
  135. void show_stack(struct task_struct *task, unsigned long *sp)
  136. {
  137. register unsigned long * __r15 asm ("15");
  138. unsigned long *stack;
  139. int i;
  140. if (!sp)
  141. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  142. else
  143. stack = sp;
  144. for (i = 0; i < kstack_depth_to_print; i++) {
  145. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  146. break;
  147. if ((i * sizeof(long) % 32) == 0)
  148. printk("%s ", i == 0 ? "" : "\n");
  149. printk(LONG, *stack++);
  150. }
  151. printk("\n");
  152. show_trace(task, sp);
  153. }
  154. static void show_last_breaking_event(struct pt_regs *regs)
  155. {
  156. #ifdef CONFIG_64BIT
  157. printk("Last Breaking-Event-Address:\n");
  158. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  159. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  160. #endif
  161. }
  162. /*
  163. * The architecture-independent dump_stack generator
  164. */
  165. void dump_stack(void)
  166. {
  167. printk("CPU: %d %s %s %.*s\n",
  168. task_thread_info(current)->cpu, print_tainted(),
  169. init_utsname()->release,
  170. (int)strcspn(init_utsname()->version, " "),
  171. init_utsname()->version);
  172. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  173. current->comm, current->pid, current,
  174. (void *) current->thread.ksp);
  175. show_stack(NULL, NULL);
  176. }
  177. EXPORT_SYMBOL(dump_stack);
  178. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  179. {
  180. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  181. }
  182. void show_registers(struct pt_regs *regs)
  183. {
  184. char *mode;
  185. mode = user_mode(regs) ? "User" : "Krnl";
  186. printk("%s PSW : %p %p",
  187. mode, (void *) regs->psw.mask,
  188. (void *) regs->psw.addr);
  189. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  190. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  191. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  192. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  193. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  194. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  195. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  196. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  197. #ifdef CONFIG_64BIT
  198. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  199. #endif
  200. printk("\n%s GPRS: " FOURLONG, mode,
  201. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  202. printk(" " FOURLONG,
  203. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  204. printk(" " FOURLONG,
  205. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  206. printk(" " FOURLONG,
  207. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  208. show_code(regs);
  209. }
  210. void show_regs(struct pt_regs *regs)
  211. {
  212. printk("CPU: %d %s %s %.*s\n",
  213. task_thread_info(current)->cpu, print_tainted(),
  214. init_utsname()->release,
  215. (int)strcspn(init_utsname()->version, " "),
  216. init_utsname()->version);
  217. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  218. current->comm, current->pid, current,
  219. (void *) current->thread.ksp);
  220. show_registers(regs);
  221. /* Show stack backtrace if pt_regs is from kernel mode */
  222. if (!user_mode(regs))
  223. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  224. show_last_breaking_event(regs);
  225. }
  226. static DEFINE_SPINLOCK(die_lock);
  227. void die(struct pt_regs *regs, const char *str)
  228. {
  229. static int die_counter;
  230. oops_enter();
  231. lgr_info_log();
  232. debug_stop_all();
  233. console_verbose();
  234. spin_lock_irq(&die_lock);
  235. bust_spinlocks(1);
  236. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  237. #ifdef CONFIG_PREEMPT
  238. printk("PREEMPT ");
  239. #endif
  240. #ifdef CONFIG_SMP
  241. printk("SMP ");
  242. #endif
  243. #ifdef CONFIG_DEBUG_PAGEALLOC
  244. printk("DEBUG_PAGEALLOC");
  245. #endif
  246. printk("\n");
  247. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  248. print_modules();
  249. show_regs(regs);
  250. bust_spinlocks(0);
  251. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  252. spin_unlock_irq(&die_lock);
  253. if (in_interrupt())
  254. panic("Fatal exception in interrupt");
  255. if (panic_on_oops)
  256. panic("Fatal exception: panic_on_oops");
  257. oops_exit();
  258. do_exit(SIGSEGV);
  259. }
  260. static inline void report_user_fault(struct pt_regs *regs, int signr)
  261. {
  262. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  263. return;
  264. if (!unhandled_signal(current, signr))
  265. return;
  266. if (!printk_ratelimit())
  267. return;
  268. printk("User process fault: interruption code 0x%X ", regs->int_code);
  269. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  270. printk("\n");
  271. show_regs(regs);
  272. }
  273. int is_valid_bugaddr(unsigned long addr)
  274. {
  275. return 1;
  276. }
  277. static void __kprobes do_trap(struct pt_regs *regs,
  278. int si_signo, int si_code, char *str)
  279. {
  280. siginfo_t info;
  281. if (notify_die(DIE_TRAP, str, regs, 0,
  282. regs->int_code, si_signo) == NOTIFY_STOP)
  283. return;
  284. if (user_mode(regs)) {
  285. info.si_signo = si_signo;
  286. info.si_errno = 0;
  287. info.si_code = si_code;
  288. info.si_addr = get_trap_ip(regs);
  289. force_sig_info(si_signo, &info, current);
  290. report_user_fault(regs, si_signo);
  291. } else {
  292. const struct exception_table_entry *fixup;
  293. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  294. if (fixup)
  295. regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
  296. else {
  297. enum bug_trap_type btt;
  298. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  299. if (btt == BUG_TRAP_TYPE_WARN)
  300. return;
  301. die(regs, str);
  302. }
  303. }
  304. }
  305. void __kprobes do_per_trap(struct pt_regs *regs)
  306. {
  307. siginfo_t info;
  308. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  309. return;
  310. if (!current->ptrace)
  311. return;
  312. info.si_signo = SIGTRAP;
  313. info.si_errno = 0;
  314. info.si_code = TRAP_HWBKPT;
  315. info.si_addr =
  316. (void __force __user *) current->thread.per_event.address;
  317. force_sig_info(SIGTRAP, &info, current);
  318. }
  319. void default_trap_handler(struct pt_regs *regs)
  320. {
  321. if (user_mode(regs)) {
  322. report_user_fault(regs, SIGSEGV);
  323. do_exit(SIGSEGV);
  324. } else
  325. die(regs, "Unknown program exception");
  326. }
  327. #define DO_ERROR_INFO(name, signr, sicode, str) \
  328. void name(struct pt_regs *regs) \
  329. { \
  330. do_trap(regs, signr, sicode, str); \
  331. }
  332. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  333. "addressing exception")
  334. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  335. "execute exception")
  336. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  337. "fixpoint divide exception")
  338. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  339. "fixpoint overflow exception")
  340. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  341. "HFP overflow exception")
  342. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  343. "HFP underflow exception")
  344. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  345. "HFP significance exception")
  346. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  347. "HFP divide exception")
  348. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  349. "HFP square root exception")
  350. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  351. "operand exception")
  352. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  353. "privileged operation")
  354. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  355. "special operation exception")
  356. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  357. "translation exception")
  358. #ifdef CONFIG_64BIT
  359. DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
  360. "transaction constraint exception")
  361. #endif
  362. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  363. {
  364. int si_code = 0;
  365. /* FPC[2] is Data Exception Code */
  366. if ((fpc & 0x00000300) == 0) {
  367. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  368. if (fpc & 0x8000) /* invalid fp operation */
  369. si_code = FPE_FLTINV;
  370. else if (fpc & 0x4000) /* div by 0 */
  371. si_code = FPE_FLTDIV;
  372. else if (fpc & 0x2000) /* overflow */
  373. si_code = FPE_FLTOVF;
  374. else if (fpc & 0x1000) /* underflow */
  375. si_code = FPE_FLTUND;
  376. else if (fpc & 0x0800) /* inexact */
  377. si_code = FPE_FLTRES;
  378. }
  379. do_trap(regs, SIGFPE, si_code, "floating point exception");
  380. }
  381. void __kprobes illegal_op(struct pt_regs *regs)
  382. {
  383. siginfo_t info;
  384. __u8 opcode[6];
  385. __u16 __user *location;
  386. int signal = 0;
  387. location = get_trap_ip(regs);
  388. if (user_mode(regs)) {
  389. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  390. return;
  391. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  392. if (current->ptrace) {
  393. info.si_signo = SIGTRAP;
  394. info.si_errno = 0;
  395. info.si_code = TRAP_BRKPT;
  396. info.si_addr = location;
  397. force_sig_info(SIGTRAP, &info, current);
  398. } else
  399. signal = SIGILL;
  400. #ifdef CONFIG_MATHEMU
  401. } else if (opcode[0] == 0xb3) {
  402. if (get_user(*((__u16 *) (opcode+2)), location+1))
  403. return;
  404. signal = math_emu_b3(opcode, regs);
  405. } else if (opcode[0] == 0xed) {
  406. if (get_user(*((__u32 *) (opcode+2)),
  407. (__u32 __user *)(location+1)))
  408. return;
  409. signal = math_emu_ed(opcode, regs);
  410. } else if (*((__u16 *) opcode) == 0xb299) {
  411. if (get_user(*((__u16 *) (opcode+2)), location+1))
  412. return;
  413. signal = math_emu_srnm(opcode, regs);
  414. } else if (*((__u16 *) opcode) == 0xb29c) {
  415. if (get_user(*((__u16 *) (opcode+2)), location+1))
  416. return;
  417. signal = math_emu_stfpc(opcode, regs);
  418. } else if (*((__u16 *) opcode) == 0xb29d) {
  419. if (get_user(*((__u16 *) (opcode+2)), location+1))
  420. return;
  421. signal = math_emu_lfpc(opcode, regs);
  422. #endif
  423. } else
  424. signal = SIGILL;
  425. } else {
  426. /*
  427. * If we get an illegal op in kernel mode, send it through the
  428. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  429. */
  430. if (notify_die(DIE_BPT, "bpt", regs, 0,
  431. 3, SIGTRAP) != NOTIFY_STOP)
  432. signal = SIGILL;
  433. }
  434. #ifdef CONFIG_MATHEMU
  435. if (signal == SIGFPE)
  436. do_fp_trap(regs, current->thread.fp_regs.fpc);
  437. else if (signal == SIGSEGV)
  438. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  439. else
  440. #endif
  441. if (signal)
  442. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  443. }
  444. #ifdef CONFIG_MATHEMU
  445. void specification_exception(struct pt_regs *regs)
  446. {
  447. __u8 opcode[6];
  448. __u16 __user *location = NULL;
  449. int signal = 0;
  450. location = (__u16 __user *) get_trap_ip(regs);
  451. if (user_mode(regs)) {
  452. get_user(*((__u16 *) opcode), location);
  453. switch (opcode[0]) {
  454. case 0x28: /* LDR Rx,Ry */
  455. signal = math_emu_ldr(opcode);
  456. break;
  457. case 0x38: /* LER Rx,Ry */
  458. signal = math_emu_ler(opcode);
  459. break;
  460. case 0x60: /* STD R,D(X,B) */
  461. get_user(*((__u16 *) (opcode+2)), location+1);
  462. signal = math_emu_std(opcode, regs);
  463. break;
  464. case 0x68: /* LD R,D(X,B) */
  465. get_user(*((__u16 *) (opcode+2)), location+1);
  466. signal = math_emu_ld(opcode, regs);
  467. break;
  468. case 0x70: /* STE R,D(X,B) */
  469. get_user(*((__u16 *) (opcode+2)), location+1);
  470. signal = math_emu_ste(opcode, regs);
  471. break;
  472. case 0x78: /* LE R,D(X,B) */
  473. get_user(*((__u16 *) (opcode+2)), location+1);
  474. signal = math_emu_le(opcode, regs);
  475. break;
  476. default:
  477. signal = SIGILL;
  478. break;
  479. }
  480. } else
  481. signal = SIGILL;
  482. if (signal == SIGFPE)
  483. do_fp_trap(regs, current->thread.fp_regs.fpc);
  484. else if (signal)
  485. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  486. }
  487. #else
  488. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  489. "specification exception");
  490. #endif
  491. void data_exception(struct pt_regs *regs)
  492. {
  493. __u16 __user *location;
  494. int signal = 0;
  495. location = get_trap_ip(regs);
  496. if (MACHINE_HAS_IEEE)
  497. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  498. #ifdef CONFIG_MATHEMU
  499. else if (user_mode(regs)) {
  500. __u8 opcode[6];
  501. get_user(*((__u16 *) opcode), location);
  502. switch (opcode[0]) {
  503. case 0x28: /* LDR Rx,Ry */
  504. signal = math_emu_ldr(opcode);
  505. break;
  506. case 0x38: /* LER Rx,Ry */
  507. signal = math_emu_ler(opcode);
  508. break;
  509. case 0x60: /* STD R,D(X,B) */
  510. get_user(*((__u16 *) (opcode+2)), location+1);
  511. signal = math_emu_std(opcode, regs);
  512. break;
  513. case 0x68: /* LD R,D(X,B) */
  514. get_user(*((__u16 *) (opcode+2)), location+1);
  515. signal = math_emu_ld(opcode, regs);
  516. break;
  517. case 0x70: /* STE R,D(X,B) */
  518. get_user(*((__u16 *) (opcode+2)), location+1);
  519. signal = math_emu_ste(opcode, regs);
  520. break;
  521. case 0x78: /* LE R,D(X,B) */
  522. get_user(*((__u16 *) (opcode+2)), location+1);
  523. signal = math_emu_le(opcode, regs);
  524. break;
  525. case 0xb3:
  526. get_user(*((__u16 *) (opcode+2)), location+1);
  527. signal = math_emu_b3(opcode, regs);
  528. break;
  529. case 0xed:
  530. get_user(*((__u32 *) (opcode+2)),
  531. (__u32 __user *)(location+1));
  532. signal = math_emu_ed(opcode, regs);
  533. break;
  534. case 0xb2:
  535. if (opcode[1] == 0x99) {
  536. get_user(*((__u16 *) (opcode+2)), location+1);
  537. signal = math_emu_srnm(opcode, regs);
  538. } else if (opcode[1] == 0x9c) {
  539. get_user(*((__u16 *) (opcode+2)), location+1);
  540. signal = math_emu_stfpc(opcode, regs);
  541. } else if (opcode[1] == 0x9d) {
  542. get_user(*((__u16 *) (opcode+2)), location+1);
  543. signal = math_emu_lfpc(opcode, regs);
  544. } else
  545. signal = SIGILL;
  546. break;
  547. default:
  548. signal = SIGILL;
  549. break;
  550. }
  551. }
  552. #endif
  553. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  554. signal = SIGFPE;
  555. else
  556. signal = SIGILL;
  557. if (signal == SIGFPE)
  558. do_fp_trap(regs, current->thread.fp_regs.fpc);
  559. else if (signal)
  560. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  561. }
  562. void space_switch_exception(struct pt_regs *regs)
  563. {
  564. /* Set user psw back to home space mode. */
  565. if (user_mode(regs))
  566. regs->psw.mask |= PSW_ASC_HOME;
  567. /* Send SIGILL. */
  568. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  569. }
  570. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  571. {
  572. bust_spinlocks(1);
  573. printk("Kernel stack overflow.\n");
  574. show_regs(regs);
  575. bust_spinlocks(0);
  576. panic("Corrupt kernel stack, can't continue.");
  577. }
  578. void __init trap_init(void)
  579. {
  580. local_mcck_enable();
  581. }