traps.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  5. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  6. *
  7. * Derived from "arch/i386/kernel/traps.c"
  8. * Copyright (C) 1991, 1992 Linus Torvalds
  9. */
  10. /*
  11. * 'Traps.c' handles hardware traps and faults after we have saved some
  12. * state in 'asm.s'.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/kernel.h>
  16. #include <linux/string.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/timer.h>
  20. #include <linux/mm.h>
  21. #include <linux/smp.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/delay.h>
  26. #include <linux/module.h>
  27. #include <linux/kdebug.h>
  28. #include <linux/kallsyms.h>
  29. #include <linux/reboot.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/bug.h>
  32. #include <linux/utsname.h>
  33. #include <asm/uaccess.h>
  34. #include <asm/io.h>
  35. #include <linux/atomic.h>
  36. #include <asm/mathemu.h>
  37. #include <asm/cpcmd.h>
  38. #include <asm/lowcore.h>
  39. #include <asm/debug.h>
  40. #include <asm/ipl.h>
  41. #include "entry.h"
  42. void (*pgm_check_table[128])(struct pt_regs *regs);
  43. int show_unhandled_signals = 1;
  44. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  45. #ifndef CONFIG_64BIT
  46. #define LONG "%08lx "
  47. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  48. static int kstack_depth_to_print = 12;
  49. #else /* CONFIG_64BIT */
  50. #define LONG "%016lx "
  51. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  52. static int kstack_depth_to_print = 20;
  53. #endif /* CONFIG_64BIT */
  54. static inline void __user *get_trap_ip(struct pt_regs *regs)
  55. {
  56. #ifdef CONFIG_64BIT
  57. unsigned long address;
  58. if (regs->int_code & 0x200)
  59. address = *(unsigned long *)(current->thread.trap_tdb + 24);
  60. else
  61. address = regs->psw.addr;
  62. return (void __user *)
  63. ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  64. #else
  65. return (void __user *)
  66. ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
  67. #endif
  68. }
  69. /*
  70. * For show_trace we have tree different stack to consider:
  71. * - the panic stack which is used if the kernel stack has overflown
  72. * - the asynchronous interrupt stack (cpu related)
  73. * - the synchronous kernel stack (process related)
  74. * The stack trace can start at any of the three stack and can potentially
  75. * touch all of them. The order is: panic stack, async stack, sync stack.
  76. */
  77. static unsigned long
  78. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  79. {
  80. struct stack_frame *sf;
  81. struct pt_regs *regs;
  82. while (1) {
  83. sp = sp & PSW_ADDR_INSN;
  84. if (sp < low || sp > high - sizeof(*sf))
  85. return sp;
  86. sf = (struct stack_frame *) sp;
  87. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  88. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  89. /* Follow the backchain. */
  90. while (1) {
  91. low = sp;
  92. sp = sf->back_chain & PSW_ADDR_INSN;
  93. if (!sp)
  94. break;
  95. if (sp <= low || sp > high - sizeof(*sf))
  96. return sp;
  97. sf = (struct stack_frame *) sp;
  98. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  99. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  100. }
  101. /* Zero backchain detected, check for interrupt frame. */
  102. sp = (unsigned long) (sf + 1);
  103. if (sp <= low || sp > high - sizeof(*regs))
  104. return sp;
  105. regs = (struct pt_regs *) sp;
  106. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  107. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  108. low = sp;
  109. sp = regs->gprs[15];
  110. }
  111. }
  112. static void show_trace(struct task_struct *task, unsigned long *stack)
  113. {
  114. register unsigned long __r15 asm ("15");
  115. unsigned long sp;
  116. sp = (unsigned long) stack;
  117. if (!sp)
  118. sp = task ? task->thread.ksp : __r15;
  119. printk("Call Trace:\n");
  120. #ifdef CONFIG_CHECK_STACK
  121. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  122. S390_lowcore.panic_stack);
  123. #endif
  124. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  125. S390_lowcore.async_stack);
  126. if (task)
  127. __show_trace(sp, (unsigned long) task_stack_page(task),
  128. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  129. else
  130. __show_trace(sp, S390_lowcore.thread_info,
  131. S390_lowcore.thread_info + THREAD_SIZE);
  132. if (!task)
  133. task = current;
  134. debug_show_held_locks(task);
  135. }
  136. void show_stack(struct task_struct *task, unsigned long *sp)
  137. {
  138. register unsigned long * __r15 asm ("15");
  139. unsigned long *stack;
  140. int i;
  141. if (!sp)
  142. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  143. else
  144. stack = sp;
  145. for (i = 0; i < kstack_depth_to_print; i++) {
  146. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  147. break;
  148. if ((i * sizeof(long) % 32) == 0)
  149. printk("%s ", i == 0 ? "" : "\n");
  150. printk(LONG, *stack++);
  151. }
  152. printk("\n");
  153. show_trace(task, sp);
  154. }
  155. static void show_last_breaking_event(struct pt_regs *regs)
  156. {
  157. #ifdef CONFIG_64BIT
  158. printk("Last Breaking-Event-Address:\n");
  159. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  160. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  161. #endif
  162. }
  163. /*
  164. * The architecture-independent dump_stack generator
  165. */
  166. void dump_stack(void)
  167. {
  168. printk("CPU: %d %s %s %.*s\n",
  169. task_thread_info(current)->cpu, print_tainted(),
  170. init_utsname()->release,
  171. (int)strcspn(init_utsname()->version, " "),
  172. init_utsname()->version);
  173. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  174. current->comm, current->pid, current,
  175. (void *) current->thread.ksp);
  176. show_stack(NULL, NULL);
  177. }
  178. EXPORT_SYMBOL(dump_stack);
  179. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  180. {
  181. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  182. }
  183. void show_registers(struct pt_regs *regs)
  184. {
  185. char *mode;
  186. mode = user_mode(regs) ? "User" : "Krnl";
  187. printk("%s PSW : %p %p",
  188. mode, (void *) regs->psw.mask,
  189. (void *) regs->psw.addr);
  190. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  191. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  192. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  193. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  194. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  195. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  196. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  197. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  198. #ifdef CONFIG_64BIT
  199. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  200. #endif
  201. printk("\n%s GPRS: " FOURLONG, mode,
  202. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  203. printk(" " FOURLONG,
  204. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  205. printk(" " FOURLONG,
  206. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  207. printk(" " FOURLONG,
  208. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  209. show_code(regs);
  210. }
  211. void show_regs(struct pt_regs *regs)
  212. {
  213. printk("CPU: %d %s %s %.*s\n",
  214. task_thread_info(current)->cpu, print_tainted(),
  215. init_utsname()->release,
  216. (int)strcspn(init_utsname()->version, " "),
  217. init_utsname()->version);
  218. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  219. current->comm, current->pid, current,
  220. (void *) current->thread.ksp);
  221. show_registers(regs);
  222. /* Show stack backtrace if pt_regs is from kernel mode */
  223. if (!user_mode(regs))
  224. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  225. show_last_breaking_event(regs);
  226. }
  227. static DEFINE_SPINLOCK(die_lock);
  228. void die(struct pt_regs *regs, const char *str)
  229. {
  230. static int die_counter;
  231. oops_enter();
  232. lgr_info_log();
  233. debug_stop_all();
  234. console_verbose();
  235. spin_lock_irq(&die_lock);
  236. bust_spinlocks(1);
  237. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  238. #ifdef CONFIG_PREEMPT
  239. printk("PREEMPT ");
  240. #endif
  241. #ifdef CONFIG_SMP
  242. printk("SMP ");
  243. #endif
  244. #ifdef CONFIG_DEBUG_PAGEALLOC
  245. printk("DEBUG_PAGEALLOC");
  246. #endif
  247. printk("\n");
  248. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  249. print_modules();
  250. show_regs(regs);
  251. bust_spinlocks(0);
  252. add_taint(TAINT_DIE);
  253. spin_unlock_irq(&die_lock);
  254. if (in_interrupt())
  255. panic("Fatal exception in interrupt");
  256. if (panic_on_oops)
  257. panic("Fatal exception: panic_on_oops");
  258. oops_exit();
  259. do_exit(SIGSEGV);
  260. }
  261. static inline void report_user_fault(struct pt_regs *regs, int signr)
  262. {
  263. if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
  264. return;
  265. if (!unhandled_signal(current, signr))
  266. return;
  267. if (!printk_ratelimit())
  268. return;
  269. printk("User process fault: interruption code 0x%X ", regs->int_code);
  270. print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
  271. printk("\n");
  272. show_regs(regs);
  273. }
  274. int is_valid_bugaddr(unsigned long addr)
  275. {
  276. return 1;
  277. }
  278. static void __kprobes do_trap(struct pt_regs *regs,
  279. int si_signo, int si_code, char *str)
  280. {
  281. siginfo_t info;
  282. if (notify_die(DIE_TRAP, str, regs, 0,
  283. regs->int_code, si_signo) == NOTIFY_STOP)
  284. return;
  285. if (user_mode(regs)) {
  286. info.si_signo = si_signo;
  287. info.si_errno = 0;
  288. info.si_code = si_code;
  289. info.si_addr = get_trap_ip(regs);
  290. force_sig_info(si_signo, &info, current);
  291. report_user_fault(regs, si_signo);
  292. } else {
  293. const struct exception_table_entry *fixup;
  294. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  295. if (fixup)
  296. regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
  297. else {
  298. enum bug_trap_type btt;
  299. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  300. if (btt == BUG_TRAP_TYPE_WARN)
  301. return;
  302. die(regs, str);
  303. }
  304. }
  305. }
  306. void __kprobes do_per_trap(struct pt_regs *regs)
  307. {
  308. siginfo_t info;
  309. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
  310. return;
  311. if (!current->ptrace)
  312. return;
  313. info.si_signo = SIGTRAP;
  314. info.si_errno = 0;
  315. info.si_code = TRAP_HWBKPT;
  316. info.si_addr =
  317. (void __force __user *) current->thread.per_event.address;
  318. force_sig_info(SIGTRAP, &info, current);
  319. }
  320. static void default_trap_handler(struct pt_regs *regs)
  321. {
  322. if (user_mode(regs)) {
  323. report_user_fault(regs, SIGSEGV);
  324. do_exit(SIGSEGV);
  325. } else
  326. die(regs, "Unknown program exception");
  327. }
  328. #define DO_ERROR_INFO(name, signr, sicode, str) \
  329. static void name(struct pt_regs *regs) \
  330. { \
  331. do_trap(regs, signr, sicode, str); \
  332. }
  333. DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
  334. "addressing exception")
  335. DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
  336. "execute exception")
  337. DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
  338. "fixpoint divide exception")
  339. DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
  340. "fixpoint overflow exception")
  341. DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
  342. "HFP overflow exception")
  343. DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
  344. "HFP underflow exception")
  345. DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
  346. "HFP significance exception")
  347. DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
  348. "HFP divide exception")
  349. DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
  350. "HFP square root exception")
  351. DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
  352. "operand exception")
  353. DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
  354. "privileged operation")
  355. DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
  356. "special operation exception")
  357. DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
  358. "translation exception")
  359. #ifdef CONFIG_64BIT
  360. DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
  361. "transaction constraint exception")
  362. #endif
  363. static inline void do_fp_trap(struct pt_regs *regs, int fpc)
  364. {
  365. int si_code = 0;
  366. /* FPC[2] is Data Exception Code */
  367. if ((fpc & 0x00000300) == 0) {
  368. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  369. if (fpc & 0x8000) /* invalid fp operation */
  370. si_code = FPE_FLTINV;
  371. else if (fpc & 0x4000) /* div by 0 */
  372. si_code = FPE_FLTDIV;
  373. else if (fpc & 0x2000) /* overflow */
  374. si_code = FPE_FLTOVF;
  375. else if (fpc & 0x1000) /* underflow */
  376. si_code = FPE_FLTUND;
  377. else if (fpc & 0x0800) /* inexact */
  378. si_code = FPE_FLTRES;
  379. }
  380. do_trap(regs, SIGFPE, si_code, "floating point exception");
  381. }
  382. static void __kprobes illegal_op(struct pt_regs *regs)
  383. {
  384. siginfo_t info;
  385. __u8 opcode[6];
  386. __u16 __user *location;
  387. int signal = 0;
  388. location = get_trap_ip(regs);
  389. if (user_mode(regs)) {
  390. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  391. return;
  392. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  393. if (current->ptrace) {
  394. info.si_signo = SIGTRAP;
  395. info.si_errno = 0;
  396. info.si_code = TRAP_BRKPT;
  397. info.si_addr = location;
  398. force_sig_info(SIGTRAP, &info, current);
  399. } else
  400. signal = SIGILL;
  401. #ifdef CONFIG_MATHEMU
  402. } else if (opcode[0] == 0xb3) {
  403. if (get_user(*((__u16 *) (opcode+2)), location+1))
  404. return;
  405. signal = math_emu_b3(opcode, regs);
  406. } else if (opcode[0] == 0xed) {
  407. if (get_user(*((__u32 *) (opcode+2)),
  408. (__u32 __user *)(location+1)))
  409. return;
  410. signal = math_emu_ed(opcode, regs);
  411. } else if (*((__u16 *) opcode) == 0xb299) {
  412. if (get_user(*((__u16 *) (opcode+2)), location+1))
  413. return;
  414. signal = math_emu_srnm(opcode, regs);
  415. } else if (*((__u16 *) opcode) == 0xb29c) {
  416. if (get_user(*((__u16 *) (opcode+2)), location+1))
  417. return;
  418. signal = math_emu_stfpc(opcode, regs);
  419. } else if (*((__u16 *) opcode) == 0xb29d) {
  420. if (get_user(*((__u16 *) (opcode+2)), location+1))
  421. return;
  422. signal = math_emu_lfpc(opcode, regs);
  423. #endif
  424. } else
  425. signal = SIGILL;
  426. } else {
  427. /*
  428. * If we get an illegal op in kernel mode, send it through the
  429. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  430. */
  431. if (notify_die(DIE_BPT, "bpt", regs, 0,
  432. 3, SIGTRAP) != NOTIFY_STOP)
  433. signal = SIGILL;
  434. }
  435. #ifdef CONFIG_MATHEMU
  436. if (signal == SIGFPE)
  437. do_fp_trap(regs, current->thread.fp_regs.fpc);
  438. else if (signal == SIGSEGV)
  439. do_trap(regs, signal, SEGV_MAPERR, "user address fault");
  440. else
  441. #endif
  442. if (signal)
  443. do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
  444. }
  445. #ifdef CONFIG_MATHEMU
  446. void specification_exception(struct pt_regs *regs)
  447. {
  448. __u8 opcode[6];
  449. __u16 __user *location = NULL;
  450. int signal = 0;
  451. location = (__u16 __user *) get_trap_ip(regs);
  452. if (user_mode(regs)) {
  453. get_user(*((__u16 *) opcode), location);
  454. switch (opcode[0]) {
  455. case 0x28: /* LDR Rx,Ry */
  456. signal = math_emu_ldr(opcode);
  457. break;
  458. case 0x38: /* LER Rx,Ry */
  459. signal = math_emu_ler(opcode);
  460. break;
  461. case 0x60: /* STD R,D(X,B) */
  462. get_user(*((__u16 *) (opcode+2)), location+1);
  463. signal = math_emu_std(opcode, regs);
  464. break;
  465. case 0x68: /* LD R,D(X,B) */
  466. get_user(*((__u16 *) (opcode+2)), location+1);
  467. signal = math_emu_ld(opcode, regs);
  468. break;
  469. case 0x70: /* STE R,D(X,B) */
  470. get_user(*((__u16 *) (opcode+2)), location+1);
  471. signal = math_emu_ste(opcode, regs);
  472. break;
  473. case 0x78: /* LE R,D(X,B) */
  474. get_user(*((__u16 *) (opcode+2)), location+1);
  475. signal = math_emu_le(opcode, regs);
  476. break;
  477. default:
  478. signal = SIGILL;
  479. break;
  480. }
  481. } else
  482. signal = SIGILL;
  483. if (signal == SIGFPE)
  484. do_fp_trap(regs, current->thread.fp_regs.fpc);
  485. else if (signal)
  486. do_trap(regs, signal, ILL_ILLOPN, "specification exception");
  487. }
  488. #else
  489. DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
  490. "specification exception");
  491. #endif
  492. static void data_exception(struct pt_regs *regs)
  493. {
  494. __u16 __user *location;
  495. int signal = 0;
  496. location = get_trap_ip(regs);
  497. if (MACHINE_HAS_IEEE)
  498. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  499. #ifdef CONFIG_MATHEMU
  500. else if (user_mode(regs)) {
  501. __u8 opcode[6];
  502. get_user(*((__u16 *) opcode), location);
  503. switch (opcode[0]) {
  504. case 0x28: /* LDR Rx,Ry */
  505. signal = math_emu_ldr(opcode);
  506. break;
  507. case 0x38: /* LER Rx,Ry */
  508. signal = math_emu_ler(opcode);
  509. break;
  510. case 0x60: /* STD R,D(X,B) */
  511. get_user(*((__u16 *) (opcode+2)), location+1);
  512. signal = math_emu_std(opcode, regs);
  513. break;
  514. case 0x68: /* LD R,D(X,B) */
  515. get_user(*((__u16 *) (opcode+2)), location+1);
  516. signal = math_emu_ld(opcode, regs);
  517. break;
  518. case 0x70: /* STE R,D(X,B) */
  519. get_user(*((__u16 *) (opcode+2)), location+1);
  520. signal = math_emu_ste(opcode, regs);
  521. break;
  522. case 0x78: /* LE R,D(X,B) */
  523. get_user(*((__u16 *) (opcode+2)), location+1);
  524. signal = math_emu_le(opcode, regs);
  525. break;
  526. case 0xb3:
  527. get_user(*((__u16 *) (opcode+2)), location+1);
  528. signal = math_emu_b3(opcode, regs);
  529. break;
  530. case 0xed:
  531. get_user(*((__u32 *) (opcode+2)),
  532. (__u32 __user *)(location+1));
  533. signal = math_emu_ed(opcode, regs);
  534. break;
  535. case 0xb2:
  536. if (opcode[1] == 0x99) {
  537. get_user(*((__u16 *) (opcode+2)), location+1);
  538. signal = math_emu_srnm(opcode, regs);
  539. } else if (opcode[1] == 0x9c) {
  540. get_user(*((__u16 *) (opcode+2)), location+1);
  541. signal = math_emu_stfpc(opcode, regs);
  542. } else if (opcode[1] == 0x9d) {
  543. get_user(*((__u16 *) (opcode+2)), location+1);
  544. signal = math_emu_lfpc(opcode, regs);
  545. } else
  546. signal = SIGILL;
  547. break;
  548. default:
  549. signal = SIGILL;
  550. break;
  551. }
  552. }
  553. #endif
  554. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  555. signal = SIGFPE;
  556. else
  557. signal = SIGILL;
  558. if (signal == SIGFPE)
  559. do_fp_trap(regs, current->thread.fp_regs.fpc);
  560. else if (signal)
  561. do_trap(regs, signal, ILL_ILLOPN, "data exception");
  562. }
  563. static void space_switch_exception(struct pt_regs *regs)
  564. {
  565. /* Set user psw back to home space mode. */
  566. if (user_mode(regs))
  567. regs->psw.mask |= PSW_ASC_HOME;
  568. /* Send SIGILL. */
  569. do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
  570. }
  571. void __kprobes kernel_stack_overflow(struct pt_regs * regs)
  572. {
  573. bust_spinlocks(1);
  574. printk("Kernel stack overflow.\n");
  575. show_regs(regs);
  576. bust_spinlocks(0);
  577. panic("Corrupt kernel stack, can't continue.");
  578. }
  579. /* init is done in lowcore.S and head.S */
  580. void __init trap_init(void)
  581. {
  582. int i;
  583. for (i = 0; i < 128; i++)
  584. pgm_check_table[i] = &default_trap_handler;
  585. pgm_check_table[1] = &illegal_op;
  586. pgm_check_table[2] = &privileged_op;
  587. pgm_check_table[3] = &execute_exception;
  588. pgm_check_table[4] = &do_protection_exception;
  589. pgm_check_table[5] = &addressing_exception;
  590. pgm_check_table[6] = &specification_exception;
  591. pgm_check_table[7] = &data_exception;
  592. pgm_check_table[8] = &overflow_exception;
  593. pgm_check_table[9] = &divide_exception;
  594. pgm_check_table[0x0A] = &overflow_exception;
  595. pgm_check_table[0x0B] = &divide_exception;
  596. pgm_check_table[0x0C] = &hfp_overflow_exception;
  597. pgm_check_table[0x0D] = &hfp_underflow_exception;
  598. pgm_check_table[0x0E] = &hfp_significance_exception;
  599. pgm_check_table[0x0F] = &hfp_divide_exception;
  600. pgm_check_table[0x10] = &do_dat_exception;
  601. pgm_check_table[0x11] = &do_dat_exception;
  602. pgm_check_table[0x12] = &translation_exception;
  603. pgm_check_table[0x13] = &special_op_exception;
  604. #ifdef CONFIG_64BIT
  605. pgm_check_table[0x18] = &transaction_exception;
  606. pgm_check_table[0x38] = &do_asce_exception;
  607. pgm_check_table[0x39] = &do_dat_exception;
  608. pgm_check_table[0x3A] = &do_dat_exception;
  609. pgm_check_table[0x3B] = &do_dat_exception;
  610. #endif /* CONFIG_64BIT */
  611. pgm_check_table[0x15] = &operand_exception;
  612. pgm_check_table[0x1C] = &space_switch_exception;
  613. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  614. /* Enable machine checks early. */
  615. local_mcck_enable();
  616. }