traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * arch/s390/kernel/traps.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. *
  9. * Derived from "arch/i386/kernel/traps.c"
  10. * Copyright (C) 1991, 1992 Linus Torvalds
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'asm.s'.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/timer.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/delay.h>
  28. #include <linux/module.h>
  29. #include <linux/kdebug.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/reboot.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/bug.h>
  34. #include <linux/utsname.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/io.h>
  38. #include <linux/atomic.h>
  39. #include <asm/mathemu.h>
  40. #include <asm/cpcmd.h>
  41. #include <asm/lowcore.h>
  42. #include <asm/debug.h>
  43. #include <asm/ipl.h>
  44. #include "entry.h"
  45. void (*pgm_check_table[128])(struct pt_regs *regs);
  46. int show_unhandled_signals = 1;
  47. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  48. #ifndef CONFIG_64BIT
  49. #define LONG "%08lx "
  50. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  51. static int kstack_depth_to_print = 12;
  52. #else /* CONFIG_64BIT */
  53. #define LONG "%016lx "
  54. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  55. static int kstack_depth_to_print = 20;
  56. #endif /* CONFIG_64BIT */
  57. /*
  58. * For show_trace we have tree different stack to consider:
  59. * - the panic stack which is used if the kernel stack has overflown
  60. * - the asynchronous interrupt stack (cpu related)
  61. * - the synchronous kernel stack (process related)
  62. * The stack trace can start at any of the three stack and can potentially
  63. * touch all of them. The order is: panic stack, async stack, sync stack.
  64. */
  65. static unsigned long
  66. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  67. {
  68. struct stack_frame *sf;
  69. struct pt_regs *regs;
  70. while (1) {
  71. sp = sp & PSW_ADDR_INSN;
  72. if (sp < low || sp > high - sizeof(*sf))
  73. return sp;
  74. sf = (struct stack_frame *) sp;
  75. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  76. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  77. /* Follow the backchain. */
  78. while (1) {
  79. low = sp;
  80. sp = sf->back_chain & PSW_ADDR_INSN;
  81. if (!sp)
  82. break;
  83. if (sp <= low || sp > high - sizeof(*sf))
  84. return sp;
  85. sf = (struct stack_frame *) sp;
  86. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  87. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  88. }
  89. /* Zero backchain detected, check for interrupt frame. */
  90. sp = (unsigned long) (sf + 1);
  91. if (sp <= low || sp > high - sizeof(*regs))
  92. return sp;
  93. regs = (struct pt_regs *) sp;
  94. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  95. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  96. low = sp;
  97. sp = regs->gprs[15];
  98. }
  99. }
  100. static void show_trace(struct task_struct *task, unsigned long *stack)
  101. {
  102. register unsigned long __r15 asm ("15");
  103. unsigned long sp;
  104. sp = (unsigned long) stack;
  105. if (!sp)
  106. sp = task ? task->thread.ksp : __r15;
  107. printk("Call Trace:\n");
  108. #ifdef CONFIG_CHECK_STACK
  109. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  110. S390_lowcore.panic_stack);
  111. #endif
  112. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  113. S390_lowcore.async_stack);
  114. if (task)
  115. __show_trace(sp, (unsigned long) task_stack_page(task),
  116. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  117. else
  118. __show_trace(sp, S390_lowcore.thread_info,
  119. S390_lowcore.thread_info + THREAD_SIZE);
  120. if (!task)
  121. task = current;
  122. debug_show_held_locks(task);
  123. }
  124. void show_stack(struct task_struct *task, unsigned long *sp)
  125. {
  126. register unsigned long * __r15 asm ("15");
  127. unsigned long *stack;
  128. int i;
  129. if (!sp)
  130. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  131. else
  132. stack = sp;
  133. for (i = 0; i < kstack_depth_to_print; i++) {
  134. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  135. break;
  136. if ((i * sizeof(long) % 32) == 0)
  137. printk("%s ", i == 0 ? "" : "\n");
  138. printk(LONG, *stack++);
  139. }
  140. printk("\n");
  141. show_trace(task, sp);
  142. }
  143. static void show_last_breaking_event(struct pt_regs *regs)
  144. {
  145. #ifdef CONFIG_64BIT
  146. printk("Last Breaking-Event-Address:\n");
  147. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  148. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  149. #endif
  150. }
  151. /*
  152. * The architecture-independent dump_stack generator
  153. */
  154. void dump_stack(void)
  155. {
  156. printk("CPU: %d %s %s %.*s\n",
  157. task_thread_info(current)->cpu, print_tainted(),
  158. init_utsname()->release,
  159. (int)strcspn(init_utsname()->version, " "),
  160. init_utsname()->version);
  161. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  162. current->comm, current->pid, current,
  163. (void *) current->thread.ksp);
  164. show_stack(NULL, NULL);
  165. }
  166. EXPORT_SYMBOL(dump_stack);
  167. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  168. {
  169. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  170. }
  171. void show_registers(struct pt_regs *regs)
  172. {
  173. char *mode;
  174. mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
  175. printk("%s PSW : %p %p",
  176. mode, (void *) regs->psw.mask,
  177. (void *) regs->psw.addr);
  178. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  179. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  180. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  181. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  182. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  183. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  184. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  185. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  186. #ifdef CONFIG_64BIT
  187. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  188. #endif
  189. printk("\n%s GPRS: " FOURLONG, mode,
  190. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  191. printk(" " FOURLONG,
  192. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  193. printk(" " FOURLONG,
  194. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  195. printk(" " FOURLONG,
  196. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  197. show_code(regs);
  198. }
  199. void show_regs(struct pt_regs *regs)
  200. {
  201. print_modules();
  202. printk("CPU: %d %s %s %.*s\n",
  203. task_thread_info(current)->cpu, print_tainted(),
  204. init_utsname()->release,
  205. (int)strcspn(init_utsname()->version, " "),
  206. init_utsname()->version);
  207. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  208. current->comm, current->pid, current,
  209. (void *) current->thread.ksp);
  210. show_registers(regs);
  211. /* Show stack backtrace if pt_regs is from kernel mode */
  212. if (!(regs->psw.mask & PSW_MASK_PSTATE))
  213. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  214. show_last_breaking_event(regs);
  215. }
  216. static DEFINE_SPINLOCK(die_lock);
  217. void die(struct pt_regs *regs, const char *str)
  218. {
  219. static int die_counter;
  220. oops_enter();
  221. lgr_info_log();
  222. debug_stop_all();
  223. console_verbose();
  224. spin_lock_irq(&die_lock);
  225. bust_spinlocks(1);
  226. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  227. #ifdef CONFIG_PREEMPT
  228. printk("PREEMPT ");
  229. #endif
  230. #ifdef CONFIG_SMP
  231. printk("SMP ");
  232. #endif
  233. #ifdef CONFIG_DEBUG_PAGEALLOC
  234. printk("DEBUG_PAGEALLOC");
  235. #endif
  236. printk("\n");
  237. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  238. show_regs(regs);
  239. bust_spinlocks(0);
  240. add_taint(TAINT_DIE);
  241. spin_unlock_irq(&die_lock);
  242. if (in_interrupt())
  243. panic("Fatal exception in interrupt");
  244. if (panic_on_oops)
  245. panic("Fatal exception: panic_on_oops");
  246. oops_exit();
  247. do_exit(SIGSEGV);
  248. }
  249. static inline void report_user_fault(struct pt_regs *regs, int signr)
  250. {
  251. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  252. return;
  253. if (!unhandled_signal(current, signr))
  254. return;
  255. if (!printk_ratelimit())
  256. return;
  257. printk("User process fault: interruption code 0x%X ", regs->int_code);
  258. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  259. printk("\n");
  260. show_regs(regs);
  261. }
  262. int is_valid_bugaddr(unsigned long addr)
  263. {
  264. return 1;
  265. }
  266. static inline void __user *get_psw_address(struct pt_regs *regs)
  267. {
  268. return (void __user *)
  269. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  270. }
  271. static void __kprobes do_trap(struct pt_regs *regs,
  272. int si_signo, int si_code, char *str)
  273. {
  274. siginfo_t info;
  275. if (notify_die(DIE_TRAP, str, regs, 0,
  276. regs->int_code, si_signo) == NOTIFY_STOP)
  277. return;
  278. if (regs->psw.mask & PSW_MASK_PSTATE) {
  279. info.si_signo = si_signo;
  280. info.si_errno = 0;
  281. info.si_code = si_code;
  282. info.si_addr = get_psw_address(regs);
  283. force_sig_info(si_signo, &info, current);
  284. report_user_fault(regs, si_signo);
  285. } else {
  286. const struct exception_table_entry *fixup;
  287. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  288. if (fixup)
  289. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  290. else {
  291. enum bug_trap_type btt;
  292. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  293. if (btt == BUG_TRAP_TYPE_WARN)
  294. return;
  295. die(regs, str);
  296. }
  297. }
  298. }
  299. void __kprobes do_per_trap(struct pt_regs *regs)
  300. {
  301. siginfo_t info;
  302. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  303. return;
  304. if (!current->ptrace)
  305. return;
  306. info.si_signo = SIGTRAP;
  307. info.si_errno = 0;
  308. info.si_code = TRAP_HWBKPT;
  309. info.si_addr =
  310. (void __force __user *) current->thread.per_event.address;
  311. force_sig_info(SIGTRAP, &info, current);
  312. }
  313. static void default_trap_handler(struct pt_regs *regs)
  314. {
  315. if (regs->psw.mask & PSW_MASK_PSTATE) {
  316. report_user_fault(regs, SIGSEGV);
  317. do_exit(SIGSEGV);
  318. } else
  319. die(regs, "Unknown program exception");
  320. }
  321. #define DO_ERROR_INFO(name, signr, sicode, str) \
  322. static void name(struct pt_regs *regs) \
  323. { \
  324. do_trap(regs, signr, sicode, str); \
  325. }
  326. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  327. "addressing exception")
  328. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  329. "execute exception")
  330. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  331. "fixpoint divide exception")
  332. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  333. "fixpoint overflow exception")
  334. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  335. "HFP overflow exception")
  336. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  337. "HFP underflow exception")
  338. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  339. "HFP significance exception")
  340. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  341. "HFP divide exception")
  342. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  343. "HFP square root exception")
  344. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  345. "operand exception")
  346. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  347. "privileged operation")
  348. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  349. "special operation exception")
  350. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  351. "translation exception")
  352. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  353. {
  354. int si_code = 0;
  355. /* FPC[2] is Data Exception Code */
  356. if ((fpc & 0x00000300) == 0) {
  357. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  358. if (fpc & 0x8000) /* invalid fp operation */
  359. si_code = FPE_FLTINV;
  360. else if (fpc & 0x4000) /* div by 0 */
  361. si_code = FPE_FLTDIV;
  362. else if (fpc & 0x2000) /* overflow */
  363. si_code = FPE_FLTOVF;
  364. else if (fpc & 0x1000) /* underflow */
  365. si_code = FPE_FLTUND;
  366. else if (fpc & 0x0800) /* inexact */
  367. si_code = FPE_FLTRES;
  368. }
  369. do_trap(regs, SIGFPE, si_code, "floating point exception");
  370. }
  371. static void __kprobes illegal_op(struct pt_regs *regs)
  372. {
  373. siginfo_t info;
  374. __u8 opcode[6];
  375. __u16 __user *location;
  376. int signal = 0;
  377. location = get_psw_address(regs);
  378. if (regs->psw.mask & PSW_MASK_PSTATE) {
  379. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  380. return;
  381. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  382. if (current->ptrace) {
  383. info.si_signo = SIGTRAP;
  384. info.si_errno = 0;
  385. info.si_code = TRAP_BRKPT;
  386. info.si_addr = location;
  387. force_sig_info(SIGTRAP, &info, current);
  388. } else
  389. signal = SIGILL;
  390. #ifdef CONFIG_MATHEMU
  391. } else if (opcode[0] == 0xb3) {
  392. if (get_user(*((__u16 *) (opcode+2)), location+1))
  393. return;
  394. signal = math_emu_b3(opcode, regs);
  395. } else if (opcode[0] == 0xed) {
  396. if (get_user(*((__u32 *) (opcode+2)),
  397. (__u32 __user *)(location+1)))
  398. return;
  399. signal = math_emu_ed(opcode, regs);
  400. } else if (*((__u16 *) opcode) == 0xb299) {
  401. if (get_user(*((__u16 *) (opcode+2)), location+1))
  402. return;
  403. signal = math_emu_srnm(opcode, regs);
  404. } else if (*((__u16 *) opcode) == 0xb29c) {
  405. if (get_user(*((__u16 *) (opcode+2)), location+1))
  406. return;
  407. signal = math_emu_stfpc(opcode, regs);
  408. } else if (*((__u16 *) opcode) == 0xb29d) {
  409. if (get_user(*((__u16 *) (opcode+2)), location+1))
  410. return;
  411. signal = math_emu_lfpc(opcode, regs);
  412. #endif
  413. } else
  414. signal = SIGILL;
  415. } else {
  416. /*
  417. * If we get an illegal op in kernel mode, send it through the
  418. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  419. */
  420. if (notify_die(DIE_BPT, "bpt", regs, 0,
  421. 3, SIGTRAP) != NOTIFY_STOP)
  422. signal = SIGILL;
  423. }
  424. #ifdef CONFIG_MATHEMU
  425. if (signal == SIGFPE)
  426. do_fp_trap(regs, current->thread.fp_regs.fpc);
  427. else if (signal == SIGSEGV)
  428. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  429. else
  430. #endif
  431. if (signal)
  432. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  433. }
  434. #ifdef CONFIG_MATHEMU
  435. void specification_exception(struct pt_regs *regs)
  436. {
  437. __u8 opcode[6];
  438. __u16 __user *location = NULL;
  439. int signal = 0;
  440. location = (__u16 __user *) get_psw_address(regs);
  441. if (regs->psw.mask & PSW_MASK_PSTATE) {
  442. get_user(*((__u16 *) opcode), location);
  443. switch (opcode[0]) {
  444. case 0x28: /* LDR Rx,Ry */
  445. signal = math_emu_ldr(opcode);
  446. break;
  447. case 0x38: /* LER Rx,Ry */
  448. signal = math_emu_ler(opcode);
  449. break;
  450. case 0x60: /* STD R,D(X,B) */
  451. get_user(*((__u16 *) (opcode+2)), location+1);
  452. signal = math_emu_std(opcode, regs);
  453. break;
  454. case 0x68: /* LD R,D(X,B) */
  455. get_user(*((__u16 *) (opcode+2)), location+1);
  456. signal = math_emu_ld(opcode, regs);
  457. break;
  458. case 0x70: /* STE R,D(X,B) */
  459. get_user(*((__u16 *) (opcode+2)), location+1);
  460. signal = math_emu_ste(opcode, regs);
  461. break;
  462. case 0x78: /* LE R,D(X,B) */
  463. get_user(*((__u16 *) (opcode+2)), location+1);
  464. signal = math_emu_le(opcode, regs);
  465. break;
  466. default:
  467. signal = SIGILL;
  468. break;
  469. }
  470. } else
  471. signal = SIGILL;
  472. if (signal == SIGFPE)
  473. do_fp_trap(regs, current->thread.fp_regs.fpc);
  474. else if (signal)
  475. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  476. }
  477. #else
  478. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  479. "specification exception");
  480. #endif
  481. static void data_exception(struct pt_regs *regs)
  482. {
  483. __u16 __user *location;
  484. int signal = 0;
  485. location = get_psw_address(regs);
  486. if (MACHINE_HAS_IEEE)
  487. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  488. #ifdef CONFIG_MATHEMU
  489. else if (regs->psw.mask & PSW_MASK_PSTATE) {
  490. __u8 opcode[6];
  491. get_user(*((__u16 *) opcode), location);
  492. switch (opcode[0]) {
  493. case 0x28: /* LDR Rx,Ry */
  494. signal = math_emu_ldr(opcode);
  495. break;
  496. case 0x38: /* LER Rx,Ry */
  497. signal = math_emu_ler(opcode);
  498. break;
  499. case 0x60: /* STD R,D(X,B) */
  500. get_user(*((__u16 *) (opcode+2)), location+1);
  501. signal = math_emu_std(opcode, regs);
  502. break;
  503. case 0x68: /* LD R,D(X,B) */
  504. get_user(*((__u16 *) (opcode+2)), location+1);
  505. signal = math_emu_ld(opcode, regs);
  506. break;
  507. case 0x70: /* STE R,D(X,B) */
  508. get_user(*((__u16 *) (opcode+2)), location+1);
  509. signal = math_emu_ste(opcode, regs);
  510. break;
  511. case 0x78: /* LE R,D(X,B) */
  512. get_user(*((__u16 *) (opcode+2)), location+1);
  513. signal = math_emu_le(opcode, regs);
  514. break;
  515. case 0xb3:
  516. get_user(*((__u16 *) (opcode+2)), location+1);
  517. signal = math_emu_b3(opcode, regs);
  518. break;
  519. case 0xed:
  520. get_user(*((__u32 *) (opcode+2)),
  521. (__u32 __user *)(location+1));
  522. signal = math_emu_ed(opcode, regs);
  523. break;
  524. case 0xb2:
  525. if (opcode[1] == 0x99) {
  526. get_user(*((__u16 *) (opcode+2)), location+1);
  527. signal = math_emu_srnm(opcode, regs);
  528. } else if (opcode[1] == 0x9c) {
  529. get_user(*((__u16 *) (opcode+2)), location+1);
  530. signal = math_emu_stfpc(opcode, regs);
  531. } else if (opcode[1] == 0x9d) {
  532. get_user(*((__u16 *) (opcode+2)), location+1);
  533. signal = math_emu_lfpc(opcode, regs);
  534. } else
  535. signal = SIGILL;
  536. break;
  537. default:
  538. signal = SIGILL;
  539. break;
  540. }
  541. }
  542. #endif
  543. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  544. signal = SIGFPE;
  545. else
  546. signal = SIGILL;
  547. if (signal == SIGFPE)
  548. do_fp_trap(regs, current->thread.fp_regs.fpc);
  549. else if (signal)
  550. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  551. }
  552. static void space_switch_exception(struct pt_regs *regs)
  553. {
  554. /* Set user psw back to home space mode. */
  555. if (regs->psw.mask & PSW_MASK_PSTATE)
  556. regs->psw.mask |= PSW_ASC_HOME;
  557. /* Send SIGILL. */
  558. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  559. }
  560. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  561. {
  562. bust_spinlocks(1);
  563. printk("Kernel stack overflow.\n");
  564. show_regs(regs);
  565. bust_spinlocks(0);
  566. panic("Corrupt kernel stack, can't continue.");
  567. }
  568. /* init is done in lowcore.S and head.S */
  569. void __init trap_init(void)
  570. {
  571. int i;
  572. for (i = 0; i < 128; i++)
  573. pgm_check_table[i] = &default_trap_handler;
  574. pgm_check_table[1] = &illegal_op;
  575. pgm_check_table[2] = &privileged_op;
  576. pgm_check_table[3] = &execute_exception;
  577. pgm_check_table[4] = &do_protection_exception;
  578. pgm_check_table[5] = &addressing_exception;
  579. pgm_check_table[6] = &specification_exception;
  580. pgm_check_table[7] = &data_exception;
  581. pgm_check_table[8] = &overflow_exception;
  582. pgm_check_table[9] = &divide_exception;
  583. pgm_check_table[0x0A] = &overflow_exception;
  584. pgm_check_table[0x0B] = &divide_exception;
  585. pgm_check_table[0x0C] = &hfp_overflow_exception;
  586. pgm_check_table[0x0D] = &hfp_underflow_exception;
  587. pgm_check_table[0x0E] = &hfp_significance_exception;
  588. pgm_check_table[0x0F] = &hfp_divide_exception;
  589. pgm_check_table[0x10] = &do_dat_exception;
  590. pgm_check_table[0x11] = &do_dat_exception;
  591. pgm_check_table[0x12] = &translation_exception;
  592. pgm_check_table[0x13] = &special_op_exception;
  593. #ifdef CONFIG_64BIT
  594. pgm_check_table[0x38] = &do_asce_exception;
  595. pgm_check_table[0x39] = &do_dat_exception;
  596. pgm_check_table[0x3A] = &do_dat_exception;
  597. pgm_check_table[0x3B] = &do_dat_exception;
  598. #endif /* CONFIG_64BIT */
  599. pgm_check_table[0x15] = &operand_exception;
  600. pgm_check_table[0x1C] = &space_switch_exception;
  601. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  602. /* Enable machine checks early. */
  603. local_mcck_enable();
  604. }