traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * arch/s390/kernel/traps.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. *
  9. * Derived from "arch/i386/kernel/traps.c"
  10. * Copyright (C) 1991, 1992 Linus Torvalds
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'asm.s'.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/timer.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/delay.h>
  28. #include <linux/module.h>
  29. #include <linux/kdebug.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/reboot.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/bug.h>
  34. #include <linux/utsname.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/io.h>
  38. #include <linux/atomic.h>
  39. #include <asm/mathemu.h>
  40. #include <asm/cpcmd.h>
  41. #include <asm/lowcore.h>
  42. #include <asm/debug.h>
  43. #include "entry.h"
  44. void (*pgm_check_table[128])(struct pt_regs *regs);
  45. int show_unhandled_signals = 1;
  46. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  47. #ifndef CONFIG_64BIT
  48. #define LONG "%08lx "
  49. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  50. static int kstack_depth_to_print = 12;
  51. #else /* CONFIG_64BIT */
  52. #define LONG "%016lx "
  53. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  54. static int kstack_depth_to_print = 20;
  55. #endif /* CONFIG_64BIT */
  56. /*
  57. * For show_trace we have tree different stack to consider:
  58. * - the panic stack which is used if the kernel stack has overflown
  59. * - the asynchronous interrupt stack (cpu related)
  60. * - the synchronous kernel stack (process related)
  61. * The stack trace can start at any of the three stack and can potentially
  62. * touch all of them. The order is: panic stack, async stack, sync stack.
  63. */
  64. static unsigned long
  65. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  66. {
  67. struct stack_frame *sf;
  68. struct pt_regs *regs;
  69. while (1) {
  70. sp = sp & PSW_ADDR_INSN;
  71. if (sp < low || sp > high - sizeof(*sf))
  72. return sp;
  73. sf = (struct stack_frame *) sp;
  74. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  75. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  76. /* Follow the backchain. */
  77. while (1) {
  78. low = sp;
  79. sp = sf->back_chain & PSW_ADDR_INSN;
  80. if (!sp)
  81. break;
  82. if (sp <= low || sp > high - sizeof(*sf))
  83. return sp;
  84. sf = (struct stack_frame *) sp;
  85. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  86. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  87. }
  88. /* Zero backchain detected, check for interrupt frame. */
  89. sp = (unsigned long) (sf + 1);
  90. if (sp <= low || sp > high - sizeof(*regs))
  91. return sp;
  92. regs = (struct pt_regs *) sp;
  93. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  94. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  95. low = sp;
  96. sp = regs->gprs[15];
  97. }
  98. }
  99. static void show_trace(struct task_struct *task, unsigned long *stack)
  100. {
  101. register unsigned long __r15 asm ("15");
  102. unsigned long sp;
  103. sp = (unsigned long) stack;
  104. if (!sp)
  105. sp = task ? task->thread.ksp : __r15;
  106. printk("Call Trace:\n");
  107. #ifdef CONFIG_CHECK_STACK
  108. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  109. S390_lowcore.panic_stack);
  110. #endif
  111. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  112. S390_lowcore.async_stack);
  113. if (task)
  114. __show_trace(sp, (unsigned long) task_stack_page(task),
  115. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  116. else
  117. __show_trace(sp, S390_lowcore.thread_info,
  118. S390_lowcore.thread_info + THREAD_SIZE);
  119. if (!task)
  120. task = current;
  121. debug_show_held_locks(task);
  122. }
  123. void show_stack(struct task_struct *task, unsigned long *sp)
  124. {
  125. register unsigned long * __r15 asm ("15");
  126. unsigned long *stack;
  127. int i;
  128. if (!sp)
  129. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  130. else
  131. stack = sp;
  132. for (i = 0; i < kstack_depth_to_print; i++) {
  133. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  134. break;
  135. if (i && ((i * sizeof (long) % 32) == 0))
  136. printk("\n ");
  137. printk(LONG, *stack++);
  138. }
  139. printk("\n");
  140. show_trace(task, sp);
  141. }
  142. static void show_last_breaking_event(struct pt_regs *regs)
  143. {
  144. #ifdef CONFIG_64BIT
  145. printk("Last Breaking-Event-Address:\n");
  146. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  147. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  148. #endif
  149. }
  150. /*
  151. * The architecture-independent dump_stack generator
  152. */
  153. void dump_stack(void)
  154. {
  155. printk("CPU: %d %s %s %.*s\n",
  156. task_thread_info(current)->cpu, print_tainted(),
  157. init_utsname()->release,
  158. (int)strcspn(init_utsname()->version, " "),
  159. init_utsname()->version);
  160. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  161. current->comm, current->pid, current,
  162. (void *) current->thread.ksp);
  163. show_stack(NULL, NULL);
  164. }
  165. EXPORT_SYMBOL(dump_stack);
  166. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  167. {
  168. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  169. }
  170. void show_registers(struct pt_regs *regs)
  171. {
  172. char *mode;
  173. mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
  174. printk("%s PSW : %p %p",
  175. mode, (void *) regs->psw.mask,
  176. (void *) regs->psw.addr);
  177. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  178. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  179. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  180. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  181. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  182. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  183. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  184. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  185. #ifdef CONFIG_64BIT
  186. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  187. #endif
  188. printk("\n%s GPRS: " FOURLONG, mode,
  189. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  190. printk(" " FOURLONG,
  191. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  192. printk(" " FOURLONG,
  193. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  194. printk(" " FOURLONG,
  195. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  196. show_code(regs);
  197. }
  198. void show_regs(struct pt_regs *regs)
  199. {
  200. print_modules();
  201. printk("CPU: %d %s %s %.*s\n",
  202. task_thread_info(current)->cpu, print_tainted(),
  203. init_utsname()->release,
  204. (int)strcspn(init_utsname()->version, " "),
  205. init_utsname()->version);
  206. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  207. current->comm, current->pid, current,
  208. (void *) current->thread.ksp);
  209. show_registers(regs);
  210. /* Show stack backtrace if pt_regs is from kernel mode */
  211. if (!(regs->psw.mask & PSW_MASK_PSTATE))
  212. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  213. show_last_breaking_event(regs);
  214. }
  215. static DEFINE_SPINLOCK(die_lock);
  216. void die(struct pt_regs *regs, const char *str)
  217. {
  218. static int die_counter;
  219. oops_enter();
  220. debug_stop_all();
  221. console_verbose();
  222. spin_lock_irq(&die_lock);
  223. bust_spinlocks(1);
  224. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  225. #ifdef CONFIG_PREEMPT
  226. printk("PREEMPT ");
  227. #endif
  228. #ifdef CONFIG_SMP
  229. printk("SMP ");
  230. #endif
  231. #ifdef CONFIG_DEBUG_PAGEALLOC
  232. printk("DEBUG_PAGEALLOC");
  233. #endif
  234. printk("\n");
  235. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  236. show_regs(regs);
  237. bust_spinlocks(0);
  238. add_taint(TAINT_DIE);
  239. spin_unlock_irq(&die_lock);
  240. if (in_interrupt())
  241. panic("Fatal exception in interrupt");
  242. if (panic_on_oops)
  243. panic("Fatal exception: panic_on_oops");
  244. oops_exit();
  245. do_exit(SIGSEGV);
  246. }
  247. static inline void report_user_fault(struct pt_regs *regs, int signr)
  248. {
  249. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  250. return;
  251. if (!unhandled_signal(current, signr))
  252. return;
  253. if (!printk_ratelimit())
  254. return;
  255. printk("User process fault: interruption code 0x%X ", regs->int_code);
  256. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  257. printk("\n");
  258. show_regs(regs);
  259. }
  260. int is_valid_bugaddr(unsigned long addr)
  261. {
  262. return 1;
  263. }
  264. static inline void __user *get_psw_address(struct pt_regs *regs)
  265. {
  266. return (void __user *)
  267. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  268. }
  269. static void __kprobes do_trap(struct pt_regs *regs,
  270. int si_signo, int si_code, char *str)
  271. {
  272. siginfo_t info;
  273. if (notify_die(DIE_TRAP, str, regs, 0,
  274. regs->int_code, si_signo) == NOTIFY_STOP)
  275. return;
  276. if (regs->psw.mask & PSW_MASK_PSTATE) {
  277. info.si_signo = si_signo;
  278. info.si_errno = 0;
  279. info.si_code = si_code;
  280. info.si_addr = get_psw_address(regs);
  281. force_sig_info(si_signo, &info, current);
  282. report_user_fault(regs, si_signo);
  283. } else {
  284. const struct exception_table_entry *fixup;
  285. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  286. if (fixup)
  287. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  288. else {
  289. enum bug_trap_type btt;
  290. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  291. if (btt == BUG_TRAP_TYPE_WARN)
  292. return;
  293. die(regs, str);
  294. }
  295. }
  296. }
  297. void __kprobes do_per_trap(struct pt_regs *regs)
  298. {
  299. siginfo_t info;
  300. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  301. return;
  302. if (!current->ptrace)
  303. return;
  304. info.si_signo = SIGTRAP;
  305. info.si_errno = 0;
  306. info.si_code = TRAP_HWBKPT;
  307. info.si_addr =
  308. (void __force __user *) current->thread.per_event.address;
  309. force_sig_info(SIGTRAP, &info, current);
  310. }
  311. static void default_trap_handler(struct pt_regs *regs)
  312. {
  313. if (regs->psw.mask & PSW_MASK_PSTATE) {
  314. report_user_fault(regs, SIGSEGV);
  315. do_exit(SIGSEGV);
  316. } else
  317. die(regs, "Unknown program exception");
  318. }
  319. #define DO_ERROR_INFO(name, signr, sicode, str) \
  320. static void name(struct pt_regs *regs) \
  321. { \
  322. do_trap(regs, signr, sicode, str); \
  323. }
  324. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  325. "addressing exception")
  326. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  327. "execute exception")
  328. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  329. "fixpoint divide exception")
  330. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  331. "fixpoint overflow exception")
  332. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  333. "HFP overflow exception")
  334. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  335. "HFP underflow exception")
  336. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  337. "HFP significance exception")
  338. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  339. "HFP divide exception")
  340. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  341. "HFP square root exception")
  342. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  343. "operand exception")
  344. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  345. "privileged operation")
  346. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  347. "special operation exception")
  348. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  349. "translation exception")
  350. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  351. {
  352. int si_code = 0;
  353. /* FPC[2] is Data Exception Code */
  354. if ((fpc & 0x00000300) == 0) {
  355. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  356. if (fpc & 0x8000) /* invalid fp operation */
  357. si_code = FPE_FLTINV;
  358. else if (fpc & 0x4000) /* div by 0 */
  359. si_code = FPE_FLTDIV;
  360. else if (fpc & 0x2000) /* overflow */
  361. si_code = FPE_FLTOVF;
  362. else if (fpc & 0x1000) /* underflow */
  363. si_code = FPE_FLTUND;
  364. else if (fpc & 0x0800) /* inexact */
  365. si_code = FPE_FLTRES;
  366. }
  367. do_trap(regs, SIGFPE, si_code, "floating point exception");
  368. }
  369. static void __kprobes illegal_op(struct pt_regs *regs)
  370. {
  371. siginfo_t info;
  372. __u8 opcode[6];
  373. __u16 __user *location;
  374. int signal = 0;
  375. location = get_psw_address(regs);
  376. if (regs->psw.mask & PSW_MASK_PSTATE) {
  377. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  378. return;
  379. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  380. if (current->ptrace) {
  381. info.si_signo = SIGTRAP;
  382. info.si_errno = 0;
  383. info.si_code = TRAP_BRKPT;
  384. info.si_addr = location;
  385. force_sig_info(SIGTRAP, &info, current);
  386. } else
  387. signal = SIGILL;
  388. #ifdef CONFIG_MATHEMU
  389. } else if (opcode[0] == 0xb3) {
  390. if (get_user(*((__u16 *) (opcode+2)), location+1))
  391. return;
  392. signal = math_emu_b3(opcode, regs);
  393. } else if (opcode[0] == 0xed) {
  394. if (get_user(*((__u32 *) (opcode+2)),
  395. (__u32 __user *)(location+1)))
  396. return;
  397. signal = math_emu_ed(opcode, regs);
  398. } else if (*((__u16 *) opcode) == 0xb299) {
  399. if (get_user(*((__u16 *) (opcode+2)), location+1))
  400. return;
  401. signal = math_emu_srnm(opcode, regs);
  402. } else if (*((__u16 *) opcode) == 0xb29c) {
  403. if (get_user(*((__u16 *) (opcode+2)), location+1))
  404. return;
  405. signal = math_emu_stfpc(opcode, regs);
  406. } else if (*((__u16 *) opcode) == 0xb29d) {
  407. if (get_user(*((__u16 *) (opcode+2)), location+1))
  408. return;
  409. signal = math_emu_lfpc(opcode, regs);
  410. #endif
  411. } else
  412. signal = SIGILL;
  413. } else {
  414. /*
  415. * If we get an illegal op in kernel mode, send it through the
  416. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  417. */
  418. if (notify_die(DIE_BPT, "bpt", regs, 0,
  419. 3, SIGTRAP) != NOTIFY_STOP)
  420. signal = SIGILL;
  421. }
  422. #ifdef CONFIG_MATHEMU
  423. if (signal == SIGFPE)
  424. do_fp_trap(regs, current->thread.fp_regs.fpc);
  425. else if (signal == SIGSEGV)
  426. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  427. else
  428. #endif
  429. if (signal)
  430. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  431. }
  432. #ifdef CONFIG_MATHEMU
  433. void specification_exception(struct pt_regs *regs)
  434. {
  435. __u8 opcode[6];
  436. __u16 __user *location = NULL;
  437. int signal = 0;
  438. location = (__u16 __user *) get_psw_address(regs);
  439. if (regs->psw.mask & PSW_MASK_PSTATE) {
  440. get_user(*((__u16 *) opcode), location);
  441. switch (opcode[0]) {
  442. case 0x28: /* LDR Rx,Ry */
  443. signal = math_emu_ldr(opcode);
  444. break;
  445. case 0x38: /* LER Rx,Ry */
  446. signal = math_emu_ler(opcode);
  447. break;
  448. case 0x60: /* STD R,D(X,B) */
  449. get_user(*((__u16 *) (opcode+2)), location+1);
  450. signal = math_emu_std(opcode, regs);
  451. break;
  452. case 0x68: /* LD R,D(X,B) */
  453. get_user(*((__u16 *) (opcode+2)), location+1);
  454. signal = math_emu_ld(opcode, regs);
  455. break;
  456. case 0x70: /* STE R,D(X,B) */
  457. get_user(*((__u16 *) (opcode+2)), location+1);
  458. signal = math_emu_ste(opcode, regs);
  459. break;
  460. case 0x78: /* LE R,D(X,B) */
  461. get_user(*((__u16 *) (opcode+2)), location+1);
  462. signal = math_emu_le(opcode, regs);
  463. break;
  464. default:
  465. signal = SIGILL;
  466. break;
  467. }
  468. } else
  469. signal = SIGILL;
  470. if (signal == SIGFPE)
  471. do_fp_trap(regs, current->thread.fp_regs.fpc);
  472. else if (signal)
  473. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  474. }
  475. #else
  476. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  477. "specification exception");
  478. #endif
  479. static void data_exception(struct pt_regs *regs)
  480. {
  481. __u16 __user *location;
  482. int signal = 0;
  483. location = get_psw_address(regs);
  484. if (MACHINE_HAS_IEEE)
  485. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  486. #ifdef CONFIG_MATHEMU
  487. else if (regs->psw.mask & PSW_MASK_PSTATE) {
  488. __u8 opcode[6];
  489. get_user(*((__u16 *) opcode), location);
  490. switch (opcode[0]) {
  491. case 0x28: /* LDR Rx,Ry */
  492. signal = math_emu_ldr(opcode);
  493. break;
  494. case 0x38: /* LER Rx,Ry */
  495. signal = math_emu_ler(opcode);
  496. break;
  497. case 0x60: /* STD R,D(X,B) */
  498. get_user(*((__u16 *) (opcode+2)), location+1);
  499. signal = math_emu_std(opcode, regs);
  500. break;
  501. case 0x68: /* LD R,D(X,B) */
  502. get_user(*((__u16 *) (opcode+2)), location+1);
  503. signal = math_emu_ld(opcode, regs);
  504. break;
  505. case 0x70: /* STE R,D(X,B) */
  506. get_user(*((__u16 *) (opcode+2)), location+1);
  507. signal = math_emu_ste(opcode, regs);
  508. break;
  509. case 0x78: /* LE R,D(X,B) */
  510. get_user(*((__u16 *) (opcode+2)), location+1);
  511. signal = math_emu_le(opcode, regs);
  512. break;
  513. case 0xb3:
  514. get_user(*((__u16 *) (opcode+2)), location+1);
  515. signal = math_emu_b3(opcode, regs);
  516. break;
  517. case 0xed:
  518. get_user(*((__u32 *) (opcode+2)),
  519. (__u32 __user *)(location+1));
  520. signal = math_emu_ed(opcode, regs);
  521. break;
  522. case 0xb2:
  523. if (opcode[1] == 0x99) {
  524. get_user(*((__u16 *) (opcode+2)), location+1);
  525. signal = math_emu_srnm(opcode, regs);
  526. } else if (opcode[1] == 0x9c) {
  527. get_user(*((__u16 *) (opcode+2)), location+1);
  528. signal = math_emu_stfpc(opcode, regs);
  529. } else if (opcode[1] == 0x9d) {
  530. get_user(*((__u16 *) (opcode+2)), location+1);
  531. signal = math_emu_lfpc(opcode, regs);
  532. } else
  533. signal = SIGILL;
  534. break;
  535. default:
  536. signal = SIGILL;
  537. break;
  538. }
  539. }
  540. #endif
  541. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  542. signal = SIGFPE;
  543. else
  544. signal = SIGILL;
  545. if (signal == SIGFPE)
  546. do_fp_trap(regs, current->thread.fp_regs.fpc);
  547. else if (signal)
  548. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  549. }
  550. static void space_switch_exception(struct pt_regs *regs)
  551. {
  552. /* Set user psw back to home space mode. */
  553. if (regs->psw.mask & PSW_MASK_PSTATE)
  554. regs->psw.mask |= PSW_ASC_HOME;
  555. /* Send SIGILL. */
  556. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  557. }
  558. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  559. {
  560. bust_spinlocks(1);
  561. printk("Kernel stack overflow.\n");
  562. show_regs(regs);
  563. bust_spinlocks(0);
  564. panic("Corrupt kernel stack, can't continue.");
  565. }
  566. /* init is done in lowcore.S and head.S */
  567. void __init trap_init(void)
  568. {
  569. int i;
  570. for (i = 0; i < 128; i++)
  571. pgm_check_table[i] = &default_trap_handler;
  572. pgm_check_table[1] = &illegal_op;
  573. pgm_check_table[2] = &privileged_op;
  574. pgm_check_table[3] = &execute_exception;
  575. pgm_check_table[4] = &do_protection_exception;
  576. pgm_check_table[5] = &addressing_exception;
  577. pgm_check_table[6] = &specification_exception;
  578. pgm_check_table[7] = &data_exception;
  579. pgm_check_table[8] = &overflow_exception;
  580. pgm_check_table[9] = &divide_exception;
  581. pgm_check_table[0x0A] = &overflow_exception;
  582. pgm_check_table[0x0B] = &divide_exception;
  583. pgm_check_table[0x0C] = &hfp_overflow_exception;
  584. pgm_check_table[0x0D] = &hfp_underflow_exception;
  585. pgm_check_table[0x0E] = &hfp_significance_exception;
  586. pgm_check_table[0x0F] = &hfp_divide_exception;
  587. pgm_check_table[0x10] = &do_dat_exception;
  588. pgm_check_table[0x11] = &do_dat_exception;
  589. pgm_check_table[0x12] = &translation_exception;
  590. pgm_check_table[0x13] = &special_op_exception;
  591. #ifdef CONFIG_64BIT
  592. pgm_check_table[0x38] = &do_asce_exception;
  593. pgm_check_table[0x39] = &do_dat_exception;
  594. pgm_check_table[0x3A] = &do_dat_exception;
  595. pgm_check_table[0x3B] = &do_dat_exception;
  596. #endif /* CONFIG_64BIT */
  597. pgm_check_table[0x15] = &operand_exception;
  598. pgm_check_table[0x1C] = &space_switch_exception;
  599. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  600. /* Enable machine checks early. */
  601. local_mcck_enable();
  602. }