traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  5. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  6. *
  7. * Derived from "arch/i386/kernel/traps.c"
  8. * Copyright (C) 1991, 1992 Linus Torvalds
  9. */
  10. /*
  11. * 'Traps.c' handles hardware traps and faults after we have saved some
  12. * state in 'asm.s'.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/timer.h>
  20. #include <linux/mm.h>
  21. #include <linux/smp.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/delay.h>
  26. #include <linux/module.h>
  27. #include <linux/kdebug.h>
  28. #include <linux/kallsyms.h>
  29. #include <linux/reboot.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/bug.h>
  32. #include <linux/utsname.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/io.h>
  35. #include <linux/atomic.h>
  36. #include <asm/mathemu.h>
  37. #include <asm/cpcmd.h>
  38. #include <asm/lowcore.h>
  39. #include <asm/debug.h>
  40. #include <asm/ipl.h>
  41. #include "entry.h"
  42. void (*pgm_check_table[128])(struct pt_regs *regs);
  43. int show_unhandled_signals = 1;
  44. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  45. #ifndef CONFIG_64BIT
  46. #define LONG "%08lx "
  47. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  48. static int kstack_depth_to_print = 12;
  49. #else /* CONFIG_64BIT */
  50. #define LONG "%016lx "
  51. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  52. static int kstack_depth_to_print = 20;
  53. #endif /* CONFIG_64BIT */
  54. /*
  55. * For show_trace we have tree different stack to consider:
  56. * - the panic stack which is used if the kernel stack has overflown
  57. * - the asynchronous interrupt stack (cpu related)
  58. * - the synchronous kernel stack (process related)
  59. * The stack trace can start at any of the three stack and can potentially
  60. * touch all of them. The order is: panic stack, async stack, sync stack.
  61. */
  62. static unsigned long
  63. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  64. {
  65. struct stack_frame *sf;
  66. struct pt_regs *regs;
  67. while (1) {
  68. sp = sp & PSW_ADDR_INSN;
  69. if (sp < low || sp > high - sizeof(*sf))
  70. return sp;
  71. sf = (struct stack_frame *) sp;
  72. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  73. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  74. /* Follow the backchain. */
  75. while (1) {
  76. low = sp;
  77. sp = sf->back_chain & PSW_ADDR_INSN;
  78. if (!sp)
  79. break;
  80. if (sp <= low || sp > high - sizeof(*sf))
  81. return sp;
  82. sf = (struct stack_frame *) sp;
  83. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  84. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  85. }
  86. /* Zero backchain detected, check for interrupt frame. */
  87. sp = (unsigned long) (sf + 1);
  88. if (sp <= low || sp > high - sizeof(*regs))
  89. return sp;
  90. regs = (struct pt_regs *) sp;
  91. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  92. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  93. low = sp;
  94. sp = regs->gprs[15];
  95. }
  96. }
  97. static void show_trace(struct task_struct *task, unsigned long *stack)
  98. {
  99. register unsigned long __r15 asm ("15");
  100. unsigned long sp;
  101. sp = (unsigned long) stack;
  102. if (!sp)
  103. sp = task ? task->thread.ksp : __r15;
  104. printk("Call Trace:\n");
  105. #ifdef CONFIG_CHECK_STACK
  106. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  107. S390_lowcore.panic_stack);
  108. #endif
  109. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  110. S390_lowcore.async_stack);
  111. if (task)
  112. __show_trace(sp, (unsigned long) task_stack_page(task),
  113. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  114. else
  115. __show_trace(sp, S390_lowcore.thread_info,
  116. S390_lowcore.thread_info + THREAD_SIZE);
  117. if (!task)
  118. task = current;
  119. debug_show_held_locks(task);
  120. }
  121. void show_stack(struct task_struct *task, unsigned long *sp)
  122. {
  123. register unsigned long * __r15 asm ("15");
  124. unsigned long *stack;
  125. int i;
  126. if (!sp)
  127. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  128. else
  129. stack = sp;
  130. for (i = 0; i < kstack_depth_to_print; i++) {
  131. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  132. break;
  133. if ((i * sizeof(long) % 32) == 0)
  134. printk("%s ", i == 0 ? "" : "\n");
  135. printk(LONG, *stack++);
  136. }
  137. printk("\n");
  138. show_trace(task, sp);
  139. }
  140. static void show_last_breaking_event(struct pt_regs *regs)
  141. {
  142. #ifdef CONFIG_64BIT
  143. printk("Last Breaking-Event-Address:\n");
  144. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  145. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  146. #endif
  147. }
  148. /*
  149. * The architecture-independent dump_stack generator
  150. */
  151. void dump_stack(void)
  152. {
  153. printk("CPU: %d %s %s %.*s\n",
  154. task_thread_info(current)->cpu, print_tainted(),
  155. init_utsname()->release,
  156. (int)strcspn(init_utsname()->version, " "),
  157. init_utsname()->version);
  158. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  159. current->comm, current->pid, current,
  160. (void *) current->thread.ksp);
  161. show_stack(NULL, NULL);
  162. }
  163. EXPORT_SYMBOL(dump_stack);
  164. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  165. {
  166. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  167. }
  168. void show_registers(struct pt_regs *regs)
  169. {
  170. char *mode;
  171. mode = user_mode(regs) ? "User" : "Krnl";
  172. printk("%s PSW : %p %p",
  173. mode, (void *) regs->psw.mask,
  174. (void *) regs->psw.addr);
  175. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  176. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  177. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  178. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  179. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  180. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  181. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  182. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  183. #ifdef CONFIG_64BIT
  184. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  185. #endif
  186. printk("\n%s GPRS: " FOURLONG, mode,
  187. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  188. printk(" " FOURLONG,
  189. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  190. printk(" " FOURLONG,
  191. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  192. printk(" " FOURLONG,
  193. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  194. show_code(regs);
  195. }
  196. void show_regs(struct pt_regs *regs)
  197. {
  198. print_modules();
  199. printk("CPU: %d %s %s %.*s\n",
  200. task_thread_info(current)->cpu, print_tainted(),
  201. init_utsname()->release,
  202. (int)strcspn(init_utsname()->version, " "),
  203. init_utsname()->version);
  204. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  205. current->comm, current->pid, current,
  206. (void *) current->thread.ksp);
  207. show_registers(regs);
  208. /* Show stack backtrace if pt_regs is from kernel mode */
  209. if (!user_mode(regs))
  210. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  211. show_last_breaking_event(regs);
  212. }
  213. static DEFINE_SPINLOCK(die_lock);
  214. void die(struct pt_regs *regs, const char *str)
  215. {
  216. static int die_counter;
  217. oops_enter();
  218. lgr_info_log();
  219. debug_stop_all();
  220. console_verbose();
  221. spin_lock_irq(&die_lock);
  222. bust_spinlocks(1);
  223. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  224. #ifdef CONFIG_PREEMPT
  225. printk("PREEMPT ");
  226. #endif
  227. #ifdef CONFIG_SMP
  228. printk("SMP ");
  229. #endif
  230. #ifdef CONFIG_DEBUG_PAGEALLOC
  231. printk("DEBUG_PAGEALLOC");
  232. #endif
  233. printk("\n");
  234. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  235. show_regs(regs);
  236. bust_spinlocks(0);
  237. add_taint(TAINT_DIE);
  238. spin_unlock_irq(&die_lock);
  239. if (in_interrupt())
  240. panic("Fatal exception in interrupt");
  241. if (panic_on_oops)
  242. panic("Fatal exception: panic_on_oops");
  243. oops_exit();
  244. do_exit(SIGSEGV);
  245. }
  246. static inline void report_user_fault(struct pt_regs *regs, int signr)
  247. {
  248. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  249. return;
  250. if (!unhandled_signal(current, signr))
  251. return;
  252. if (!printk_ratelimit())
  253. return;
  254. printk("User process fault: interruption code 0x%X ", regs->int_code);
  255. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  256. printk("\n");
  257. show_regs(regs);
  258. }
  259. int is_valid_bugaddr(unsigned long addr)
  260. {
  261. return 1;
  262. }
  263. static inline void __user *get_psw_address(struct pt_regs *regs)
  264. {
  265. return (void __user *)
  266. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  267. }
  268. static void __kprobes do_trap(struct pt_regs *regs,
  269. int si_signo, int si_code, char *str)
  270. {
  271. siginfo_t info;
  272. if (notify_die(DIE_TRAP, str, regs, 0,
  273. regs->int_code, si_signo) == NOTIFY_STOP)
  274. return;
  275. if (user_mode(regs)) {
  276. info.si_signo = si_signo;
  277. info.si_errno = 0;
  278. info.si_code = si_code;
  279. info.si_addr = get_psw_address(regs);
  280. force_sig_info(si_signo, &info, current);
  281. report_user_fault(regs, si_signo);
  282. } else {
  283. const struct exception_table_entry *fixup;
  284. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  285. if (fixup)
  286. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  287. else {
  288. enum bug_trap_type btt;
  289. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  290. if (btt == BUG_TRAP_TYPE_WARN)
  291. return;
  292. die(regs, str);
  293. }
  294. }
  295. }
  296. void __kprobes do_per_trap(struct pt_regs *regs)
  297. {
  298. siginfo_t info;
  299. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  300. return;
  301. if (!current->ptrace)
  302. return;
  303. info.si_signo = SIGTRAP;
  304. info.si_errno = 0;
  305. info.si_code = TRAP_HWBKPT;
  306. info.si_addr =
  307. (void __force __user *) current->thread.per_event.address;
  308. force_sig_info(SIGTRAP, &info, current);
  309. }
  310. static void default_trap_handler(struct pt_regs *regs)
  311. {
  312. if (user_mode(regs)) {
  313. report_user_fault(regs, SIGSEGV);
  314. do_exit(SIGSEGV);
  315. } else
  316. die(regs, "Unknown program exception");
  317. }
  318. #define DO_ERROR_INFO(name, signr, sicode, str) \
  319. static void name(struct pt_regs *regs) \
  320. { \
  321. do_trap(regs, signr, sicode, str); \
  322. }
  323. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  324. "addressing exception")
  325. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  326. "execute exception")
  327. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  328. "fixpoint divide exception")
  329. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  330. "fixpoint overflow exception")
  331. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  332. "HFP overflow exception")
  333. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  334. "HFP underflow exception")
  335. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  336. "HFP significance exception")
  337. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  338. "HFP divide exception")
  339. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  340. "HFP square root exception")
  341. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  342. "operand exception")
  343. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  344. "privileged operation")
  345. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  346. "special operation exception")
  347. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  348. "translation exception")
  349. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  350. {
  351. int si_code = 0;
  352. /* FPC[2] is Data Exception Code */
  353. if ((fpc & 0x00000300) == 0) {
  354. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  355. if (fpc & 0x8000) /* invalid fp operation */
  356. si_code = FPE_FLTINV;
  357. else if (fpc & 0x4000) /* div by 0 */
  358. si_code = FPE_FLTDIV;
  359. else if (fpc & 0x2000) /* overflow */
  360. si_code = FPE_FLTOVF;
  361. else if (fpc & 0x1000) /* underflow */
  362. si_code = FPE_FLTUND;
  363. else if (fpc & 0x0800) /* inexact */
  364. si_code = FPE_FLTRES;
  365. }
  366. do_trap(regs, SIGFPE, si_code, "floating point exception");
  367. }
  368. static void __kprobes illegal_op(struct pt_regs *regs)
  369. {
  370. siginfo_t info;
  371. __u8 opcode[6];
  372. __u16 __user *location;
  373. int signal = 0;
  374. location = get_psw_address(regs);
  375. if (user_mode(regs)) {
  376. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  377. return;
  378. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  379. if (current->ptrace) {
  380. info.si_signo = SIGTRAP;
  381. info.si_errno = 0;
  382. info.si_code = TRAP_BRKPT;
  383. info.si_addr = location;
  384. force_sig_info(SIGTRAP, &info, current);
  385. } else
  386. signal = SIGILL;
  387. #ifdef CONFIG_MATHEMU
  388. } else if (opcode[0] == 0xb3) {
  389. if (get_user(*((__u16 *) (opcode+2)), location+1))
  390. return;
  391. signal = math_emu_b3(opcode, regs);
  392. } else if (opcode[0] == 0xed) {
  393. if (get_user(*((__u32 *) (opcode+2)),
  394. (__u32 __user *)(location+1)))
  395. return;
  396. signal = math_emu_ed(opcode, regs);
  397. } else if (*((__u16 *) opcode) == 0xb299) {
  398. if (get_user(*((__u16 *) (opcode+2)), location+1))
  399. return;
  400. signal = math_emu_srnm(opcode, regs);
  401. } else if (*((__u16 *) opcode) == 0xb29c) {
  402. if (get_user(*((__u16 *) (opcode+2)), location+1))
  403. return;
  404. signal = math_emu_stfpc(opcode, regs);
  405. } else if (*((__u16 *) opcode) == 0xb29d) {
  406. if (get_user(*((__u16 *) (opcode+2)), location+1))
  407. return;
  408. signal = math_emu_lfpc(opcode, regs);
  409. #endif
  410. } else
  411. signal = SIGILL;
  412. } else {
  413. /*
  414. * If we get an illegal op in kernel mode, send it through the
  415. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  416. */
  417. if (notify_die(DIE_BPT, "bpt", regs, 0,
  418. 3, SIGTRAP) != NOTIFY_STOP)
  419. signal = SIGILL;
  420. }
  421. #ifdef CONFIG_MATHEMU
  422. if (signal == SIGFPE)
  423. do_fp_trap(regs, current->thread.fp_regs.fpc);
  424. else if (signal == SIGSEGV)
  425. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  426. else
  427. #endif
  428. if (signal)
  429. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  430. }
  431. #ifdef CONFIG_MATHEMU
  432. void specification_exception(struct pt_regs *regs)
  433. {
  434. __u8 opcode[6];
  435. __u16 __user *location = NULL;
  436. int signal = 0;
  437. location = (__u16 __user *) get_psw_address(regs);
  438. if (user_mode(regs)) {
  439. get_user(*((__u16 *) opcode), location);
  440. switch (opcode[0]) {
  441. case 0x28: /* LDR Rx,Ry */
  442. signal = math_emu_ldr(opcode);
  443. break;
  444. case 0x38: /* LER Rx,Ry */
  445. signal = math_emu_ler(opcode);
  446. break;
  447. case 0x60: /* STD R,D(X,B) */
  448. get_user(*((__u16 *) (opcode+2)), location+1);
  449. signal = math_emu_std(opcode, regs);
  450. break;
  451. case 0x68: /* LD R,D(X,B) */
  452. get_user(*((__u16 *) (opcode+2)), location+1);
  453. signal = math_emu_ld(opcode, regs);
  454. break;
  455. case 0x70: /* STE R,D(X,B) */
  456. get_user(*((__u16 *) (opcode+2)), location+1);
  457. signal = math_emu_ste(opcode, regs);
  458. break;
  459. case 0x78: /* LE R,D(X,B) */
  460. get_user(*((__u16 *) (opcode+2)), location+1);
  461. signal = math_emu_le(opcode, regs);
  462. break;
  463. default:
  464. signal = SIGILL;
  465. break;
  466. }
  467. } else
  468. signal = SIGILL;
  469. if (signal == SIGFPE)
  470. do_fp_trap(regs, current->thread.fp_regs.fpc);
  471. else if (signal)
  472. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  473. }
  474. #else
  475. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  476. "specification exception");
  477. #endif
  478. static void data_exception(struct pt_regs *regs)
  479. {
  480. __u16 __user *location;
  481. int signal = 0;
  482. location = get_psw_address(regs);
  483. if (MACHINE_HAS_IEEE)
  484. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  485. #ifdef CONFIG_MATHEMU
  486. else if (user_mode(regs)) {
  487. __u8 opcode[6];
  488. get_user(*((__u16 *) opcode), location);
  489. switch (opcode[0]) {
  490. case 0x28: /* LDR Rx,Ry */
  491. signal = math_emu_ldr(opcode);
  492. break;
  493. case 0x38: /* LER Rx,Ry */
  494. signal = math_emu_ler(opcode);
  495. break;
  496. case 0x60: /* STD R,D(X,B) */
  497. get_user(*((__u16 *) (opcode+2)), location+1);
  498. signal = math_emu_std(opcode, regs);
  499. break;
  500. case 0x68: /* LD R,D(X,B) */
  501. get_user(*((__u16 *) (opcode+2)), location+1);
  502. signal = math_emu_ld(opcode, regs);
  503. break;
  504. case 0x70: /* STE R,D(X,B) */
  505. get_user(*((__u16 *) (opcode+2)), location+1);
  506. signal = math_emu_ste(opcode, regs);
  507. break;
  508. case 0x78: /* LE R,D(X,B) */
  509. get_user(*((__u16 *) (opcode+2)), location+1);
  510. signal = math_emu_le(opcode, regs);
  511. break;
  512. case 0xb3:
  513. get_user(*((__u16 *) (opcode+2)), location+1);
  514. signal = math_emu_b3(opcode, regs);
  515. break;
  516. case 0xed:
  517. get_user(*((__u32 *) (opcode+2)),
  518. (__u32 __user *)(location+1));
  519. signal = math_emu_ed(opcode, regs);
  520. break;
  521. case 0xb2:
  522. if (opcode[1] == 0x99) {
  523. get_user(*((__u16 *) (opcode+2)), location+1);
  524. signal = math_emu_srnm(opcode, regs);
  525. } else if (opcode[1] == 0x9c) {
  526. get_user(*((__u16 *) (opcode+2)), location+1);
  527. signal = math_emu_stfpc(opcode, regs);
  528. } else if (opcode[1] == 0x9d) {
  529. get_user(*((__u16 *) (opcode+2)), location+1);
  530. signal = math_emu_lfpc(opcode, regs);
  531. } else
  532. signal = SIGILL;
  533. break;
  534. default:
  535. signal = SIGILL;
  536. break;
  537. }
  538. }
  539. #endif
  540. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  541. signal = SIGFPE;
  542. else
  543. signal = SIGILL;
  544. if (signal == SIGFPE)
  545. do_fp_trap(regs, current->thread.fp_regs.fpc);
  546. else if (signal)
  547. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  548. }
  549. static void space_switch_exception(struct pt_regs *regs)
  550. {
  551. /* Set user psw back to home space mode. */
  552. if (user_mode(regs))
  553. regs->psw.mask |= PSW_ASC_HOME;
  554. /* Send SIGILL. */
  555. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  556. }
  557. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  558. {
  559. bust_spinlocks(1);
  560. printk("Kernel stack overflow.\n");
  561. show_regs(regs);
  562. bust_spinlocks(0);
  563. panic("Corrupt kernel stack, can't continue.");
  564. }
  565. /* init is done in lowcore.S and head.S */
  566. void __init trap_init(void)
  567. {
  568. int i;
  569. for (i = 0; i < 128; i++)
  570. pgm_check_table[i] = &default_trap_handler;
  571. pgm_check_table[1] = &illegal_op;
  572. pgm_check_table[2] = &privileged_op;
  573. pgm_check_table[3] = &execute_exception;
  574. pgm_check_table[4] = &do_protection_exception;
  575. pgm_check_table[5] = &addressing_exception;
  576. pgm_check_table[6] = &specification_exception;
  577. pgm_check_table[7] = &data_exception;
  578. pgm_check_table[8] = &overflow_exception;
  579. pgm_check_table[9] = &divide_exception;
  580. pgm_check_table[0x0A] = &overflow_exception;
  581. pgm_check_table[0x0B] = &divide_exception;
  582. pgm_check_table[0x0C] = &hfp_overflow_exception;
  583. pgm_check_table[0x0D] = &hfp_underflow_exception;
  584. pgm_check_table[0x0E] = &hfp_significance_exception;
  585. pgm_check_table[0x0F] = &hfp_divide_exception;
  586. pgm_check_table[0x10] = &do_dat_exception;
  587. pgm_check_table[0x11] = &do_dat_exception;
  588. pgm_check_table[0x12] = &translation_exception;
  589. pgm_check_table[0x13] = &special_op_exception;
  590. #ifdef CONFIG_64BIT
  591. pgm_check_table[0x38] = &do_asce_exception;
  592. pgm_check_table[0x39] = &do_dat_exception;
  593. pgm_check_table[0x3A] = &do_dat_exception;
  594. pgm_check_table[0x3B] = &do_dat_exception;
  595. #endif /* CONFIG_64BIT */
  596. pgm_check_table[0x15] = &operand_exception;
  597. pgm_check_table[0x1C] = &space_switch_exception;
  598. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  599. /* Enable machine checks early. */
  600. local_mcck_enable();
  601. }