traps.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * arch/s390/kernel/traps.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. *
  9. * Derived from "arch/i386/kernel/traps.c"
  10. * Copyright (C) 1991, 1992 Linus Torvalds
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'asm.s'.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/timer.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/delay.h>
  27. #include <linux/module.h>
  28. #include <linux/kdebug.h>
  29. #include <linux/kallsyms.h>
  30. #include <linux/reboot.h>
  31. #include <linux/kprobes.h>
  32. #include <linux/bug.h>
  33. #include <asm/system.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/io.h>
  36. #include <asm/atomic.h>
  37. #include <asm/mathemu.h>
  38. #include <asm/cpcmd.h>
  39. #include <asm/s390_ext.h>
  40. #include <asm/lowcore.h>
  41. #include <asm/debug.h>
  42. /* Called from entry.S only */
  43. extern void handle_per_exception(struct pt_regs *regs);
  44. typedef void pgm_check_handler_t(struct pt_regs *, long);
  45. pgm_check_handler_t *pgm_check_table[128];
  46. #ifdef CONFIG_SYSCTL
  47. #ifdef CONFIG_PROCESS_DEBUG
  48. int sysctl_userprocess_debug = 1;
  49. #else
  50. int sysctl_userprocess_debug = 0;
  51. #endif
  52. #endif
  53. extern pgm_check_handler_t do_protection_exception;
  54. extern pgm_check_handler_t do_dat_exception;
  55. extern pgm_check_handler_t do_monitor_call;
  56. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  57. #ifndef CONFIG_64BIT
  58. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  59. static int kstack_depth_to_print = 12;
  60. #else /* CONFIG_64BIT */
  61. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  62. static int kstack_depth_to_print = 20;
  63. #endif /* CONFIG_64BIT */
  64. /*
  65. * For show_trace we have tree different stack to consider:
  66. * - the panic stack which is used if the kernel stack has overflown
  67. * - the asynchronous interrupt stack (cpu related)
  68. * - the synchronous kernel stack (process related)
  69. * The stack trace can start at any of the three stack and can potentially
  70. * touch all of them. The order is: panic stack, async stack, sync stack.
  71. */
  72. static unsigned long
  73. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  74. {
  75. struct stack_frame *sf;
  76. struct pt_regs *regs;
  77. while (1) {
  78. sp = sp & PSW_ADDR_INSN;
  79. if (sp < low || sp > high - sizeof(*sf))
  80. return sp;
  81. sf = (struct stack_frame *) sp;
  82. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  83. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  84. /* Follow the backchain. */
  85. while (1) {
  86. low = sp;
  87. sp = sf->back_chain & PSW_ADDR_INSN;
  88. if (!sp)
  89. break;
  90. if (sp <= low || sp > high - sizeof(*sf))
  91. return sp;
  92. sf = (struct stack_frame *) sp;
  93. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  94. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  95. }
  96. /* Zero backchain detected, check for interrupt frame. */
  97. sp = (unsigned long) (sf + 1);
  98. if (sp <= low || sp > high - sizeof(*regs))
  99. return sp;
  100. regs = (struct pt_regs *) sp;
  101. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  102. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  103. low = sp;
  104. sp = regs->gprs[15];
  105. }
  106. }
  107. void show_trace(struct task_struct *task, unsigned long *stack)
  108. {
  109. register unsigned long __r15 asm ("15");
  110. unsigned long sp;
  111. sp = (unsigned long) stack;
  112. if (!sp)
  113. sp = task ? task->thread.ksp : __r15;
  114. printk("Call Trace:\n");
  115. #ifdef CONFIG_CHECK_STACK
  116. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  117. S390_lowcore.panic_stack);
  118. #endif
  119. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  120. S390_lowcore.async_stack);
  121. if (task)
  122. __show_trace(sp, (unsigned long) task_stack_page(task),
  123. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  124. else
  125. __show_trace(sp, S390_lowcore.thread_info,
  126. S390_lowcore.thread_info + THREAD_SIZE);
  127. printk("\n");
  128. if (!task)
  129. task = current;
  130. debug_show_held_locks(task);
  131. }
  132. void show_stack(struct task_struct *task, unsigned long *sp)
  133. {
  134. register unsigned long * __r15 asm ("15");
  135. unsigned long *stack;
  136. int i;
  137. if (!sp)
  138. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  139. else
  140. stack = sp;
  141. for (i = 0; i < kstack_depth_to_print; i++) {
  142. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  143. break;
  144. if (i && ((i * sizeof (long) % 32) == 0))
  145. printk("\n ");
  146. printk("%p ", (void *)*stack++);
  147. }
  148. printk("\n");
  149. show_trace(task, sp);
  150. }
  151. /*
  152. * The architecture-independent dump_stack generator
  153. */
  154. void dump_stack(void)
  155. {
  156. show_stack(NULL, NULL);
  157. }
  158. EXPORT_SYMBOL(dump_stack);
  159. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  160. {
  161. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  162. }
  163. void show_registers(struct pt_regs *regs)
  164. {
  165. char *mode;
  166. mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
  167. printk("%s PSW : %p %p",
  168. mode, (void *) regs->psw.mask,
  169. (void *) regs->psw.addr);
  170. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  171. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  172. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  173. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  174. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  175. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  176. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  177. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  178. #ifdef CONFIG_64BIT
  179. printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
  180. #endif
  181. printk("\n%s GPRS: " FOURLONG, mode,
  182. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  183. printk(" " FOURLONG,
  184. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  185. printk(" " FOURLONG,
  186. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  187. printk(" " FOURLONG,
  188. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  189. show_code(regs);
  190. }
  191. /* This is called from fs/proc/array.c */
  192. char *task_show_regs(struct task_struct *task, char *buffer)
  193. {
  194. struct pt_regs *regs;
  195. regs = task_pt_regs(task);
  196. buffer += sprintf(buffer, "task: %p, ksp: %p\n",
  197. task, (void *)task->thread.ksp);
  198. buffer += sprintf(buffer, "User PSW : %p %p\n",
  199. (void *) regs->psw.mask, (void *)regs->psw.addr);
  200. buffer += sprintf(buffer, "User GPRS: " FOURLONG,
  201. regs->gprs[0], regs->gprs[1],
  202. regs->gprs[2], regs->gprs[3]);
  203. buffer += sprintf(buffer, " " FOURLONG,
  204. regs->gprs[4], regs->gprs[5],
  205. regs->gprs[6], regs->gprs[7]);
  206. buffer += sprintf(buffer, " " FOURLONG,
  207. regs->gprs[8], regs->gprs[9],
  208. regs->gprs[10], regs->gprs[11]);
  209. buffer += sprintf(buffer, " " FOURLONG,
  210. regs->gprs[12], regs->gprs[13],
  211. regs->gprs[14], regs->gprs[15]);
  212. buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
  213. task->thread.acrs[0], task->thread.acrs[1],
  214. task->thread.acrs[2], task->thread.acrs[3]);
  215. buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
  216. task->thread.acrs[4], task->thread.acrs[5],
  217. task->thread.acrs[6], task->thread.acrs[7]);
  218. buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
  219. task->thread.acrs[8], task->thread.acrs[9],
  220. task->thread.acrs[10], task->thread.acrs[11]);
  221. buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
  222. task->thread.acrs[12], task->thread.acrs[13],
  223. task->thread.acrs[14], task->thread.acrs[15]);
  224. return buffer;
  225. }
  226. static DEFINE_SPINLOCK(die_lock);
  227. void die(const char * str, struct pt_regs * regs, long err)
  228. {
  229. static int die_counter;
  230. oops_enter();
  231. debug_stop_all();
  232. console_verbose();
  233. spin_lock_irq(&die_lock);
  234. bust_spinlocks(1);
  235. printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
  236. print_modules();
  237. show_regs(regs);
  238. bust_spinlocks(0);
  239. add_taint(TAINT_DIE);
  240. spin_unlock_irq(&die_lock);
  241. if (in_interrupt())
  242. panic("Fatal exception in interrupt");
  243. if (panic_on_oops)
  244. panic("Fatal exception: panic_on_oops");
  245. oops_exit();
  246. do_exit(SIGSEGV);
  247. }
  248. static void inline
  249. report_user_fault(long interruption_code, struct pt_regs *regs)
  250. {
  251. #if defined(CONFIG_SYSCTL)
  252. if (!sysctl_userprocess_debug)
  253. return;
  254. #endif
  255. #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
  256. printk("User process fault: interruption code 0x%lX\n",
  257. interruption_code);
  258. show_regs(regs);
  259. #endif
  260. }
  261. int is_valid_bugaddr(unsigned long addr)
  262. {
  263. return 1;
  264. }
  265. static void __kprobes inline do_trap(long interruption_code, int signr,
  266. char *str, struct pt_regs *regs,
  267. siginfo_t *info)
  268. {
  269. /*
  270. * We got all needed information from the lowcore and can
  271. * now safely switch on interrupts.
  272. */
  273. if (regs->psw.mask & PSW_MASK_PSTATE)
  274. local_irq_enable();
  275. if (notify_die(DIE_TRAP, str, regs, interruption_code,
  276. interruption_code, signr) == NOTIFY_STOP)
  277. return;
  278. if (regs->psw.mask & PSW_MASK_PSTATE) {
  279. struct task_struct *tsk = current;
  280. tsk->thread.trap_no = interruption_code & 0xffff;
  281. force_sig_info(signr, info, tsk);
  282. report_user_fault(interruption_code, regs);
  283. } else {
  284. const struct exception_table_entry *fixup;
  285. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  286. if (fixup)
  287. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  288. else {
  289. enum bug_trap_type btt;
  290. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  291. if (btt == BUG_TRAP_TYPE_WARN)
  292. return;
  293. die(str, regs, interruption_code);
  294. }
  295. }
  296. }
  297. static inline void __user *get_check_address(struct pt_regs *regs)
  298. {
  299. return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
  300. }
  301. void __kprobes do_single_step(struct pt_regs *regs)
  302. {
  303. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
  304. SIGTRAP) == NOTIFY_STOP){
  305. return;
  306. }
  307. if ((current->ptrace & PT_PTRACED) != 0)
  308. force_sig(SIGTRAP, current);
  309. }
  310. static void default_trap_handler(struct pt_regs * regs, long interruption_code)
  311. {
  312. if (regs->psw.mask & PSW_MASK_PSTATE) {
  313. local_irq_enable();
  314. do_exit(SIGSEGV);
  315. report_user_fault(interruption_code, regs);
  316. } else
  317. die("Unknown program exception", regs, interruption_code);
  318. }
  319. #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
  320. static void name(struct pt_regs * regs, long interruption_code) \
  321. { \
  322. siginfo_t info; \
  323. info.si_signo = signr; \
  324. info.si_errno = 0; \
  325. info.si_code = sicode; \
  326. info.si_addr = siaddr; \
  327. do_trap(interruption_code, signr, str, regs, &info); \
  328. }
  329. DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
  330. ILL_ILLADR, get_check_address(regs))
  331. DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
  332. ILL_ILLOPN, get_check_address(regs))
  333. DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
  334. FPE_INTDIV, get_check_address(regs))
  335. DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
  336. FPE_INTOVF, get_check_address(regs))
  337. DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
  338. FPE_FLTOVF, get_check_address(regs))
  339. DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
  340. FPE_FLTUND, get_check_address(regs))
  341. DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
  342. FPE_FLTRES, get_check_address(regs))
  343. DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
  344. FPE_FLTDIV, get_check_address(regs))
  345. DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
  346. FPE_FLTINV, get_check_address(regs))
  347. DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
  348. ILL_ILLOPN, get_check_address(regs))
  349. DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
  350. ILL_PRVOPC, get_check_address(regs))
  351. DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
  352. ILL_ILLOPN, get_check_address(regs))
  353. DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
  354. ILL_ILLOPN, get_check_address(regs))
  355. static inline void
  356. do_fp_trap(struct pt_regs *regs, void __user *location,
  357. int fpc, long interruption_code)
  358. {
  359. siginfo_t si;
  360. si.si_signo = SIGFPE;
  361. si.si_errno = 0;
  362. si.si_addr = location;
  363. si.si_code = 0;
  364. /* FPC[2] is Data Exception Code */
  365. if ((fpc & 0x00000300) == 0) {
  366. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  367. if (fpc & 0x8000) /* invalid fp operation */
  368. si.si_code = FPE_FLTINV;
  369. else if (fpc & 0x4000) /* div by 0 */
  370. si.si_code = FPE_FLTDIV;
  371. else if (fpc & 0x2000) /* overflow */
  372. si.si_code = FPE_FLTOVF;
  373. else if (fpc & 0x1000) /* underflow */
  374. si.si_code = FPE_FLTUND;
  375. else if (fpc & 0x0800) /* inexact */
  376. si.si_code = FPE_FLTRES;
  377. }
  378. current->thread.ieee_instruction_pointer = (addr_t) location;
  379. do_trap(interruption_code, SIGFPE,
  380. "floating point exception", regs, &si);
  381. }
  382. static void illegal_op(struct pt_regs * regs, long interruption_code)
  383. {
  384. siginfo_t info;
  385. __u8 opcode[6];
  386. __u16 __user *location;
  387. int signal = 0;
  388. location = get_check_address(regs);
  389. /*
  390. * We got all needed information from the lowcore and can
  391. * now safely switch on interrupts.
  392. */
  393. if (regs->psw.mask & PSW_MASK_PSTATE)
  394. local_irq_enable();
  395. if (regs->psw.mask & PSW_MASK_PSTATE) {
  396. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  397. return;
  398. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  399. if (current->ptrace & PT_PTRACED)
  400. force_sig(SIGTRAP, current);
  401. else
  402. signal = SIGILL;
  403. #ifdef CONFIG_MATHEMU
  404. } else if (opcode[0] == 0xb3) {
  405. if (get_user(*((__u16 *) (opcode+2)), location+1))
  406. return;
  407. signal = math_emu_b3(opcode, regs);
  408. } else if (opcode[0] == 0xed) {
  409. if (get_user(*((__u32 *) (opcode+2)),
  410. (__u32 __user *)(location+1)))
  411. return;
  412. signal = math_emu_ed(opcode, regs);
  413. } else if (*((__u16 *) opcode) == 0xb299) {
  414. if (get_user(*((__u16 *) (opcode+2)), location+1))
  415. return;
  416. signal = math_emu_srnm(opcode, regs);
  417. } else if (*((__u16 *) opcode) == 0xb29c) {
  418. if (get_user(*((__u16 *) (opcode+2)), location+1))
  419. return;
  420. signal = math_emu_stfpc(opcode, regs);
  421. } else if (*((__u16 *) opcode) == 0xb29d) {
  422. if (get_user(*((__u16 *) (opcode+2)), location+1))
  423. return;
  424. signal = math_emu_lfpc(opcode, regs);
  425. #endif
  426. } else
  427. signal = SIGILL;
  428. } else {
  429. /*
  430. * If we get an illegal op in kernel mode, send it through the
  431. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  432. */
  433. if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
  434. 3, SIGTRAP) != NOTIFY_STOP)
  435. signal = SIGILL;
  436. }
  437. #ifdef CONFIG_MATHEMU
  438. if (signal == SIGFPE)
  439. do_fp_trap(regs, location,
  440. current->thread.fp_regs.fpc, interruption_code);
  441. else if (signal == SIGSEGV) {
  442. info.si_signo = signal;
  443. info.si_errno = 0;
  444. info.si_code = SEGV_MAPERR;
  445. info.si_addr = (void __user *) location;
  446. do_trap(interruption_code, signal,
  447. "user address fault", regs, &info);
  448. } else
  449. #endif
  450. if (signal) {
  451. info.si_signo = signal;
  452. info.si_errno = 0;
  453. info.si_code = ILL_ILLOPC;
  454. info.si_addr = (void __user *) location;
  455. do_trap(interruption_code, signal,
  456. "illegal operation", regs, &info);
  457. }
  458. }
  459. #ifdef CONFIG_MATHEMU
  460. asmlinkage void
  461. specification_exception(struct pt_regs * regs, long interruption_code)
  462. {
  463. __u8 opcode[6];
  464. __u16 __user *location = NULL;
  465. int signal = 0;
  466. location = (__u16 __user *) get_check_address(regs);
  467. /*
  468. * We got all needed information from the lowcore and can
  469. * now safely switch on interrupts.
  470. */
  471. if (regs->psw.mask & PSW_MASK_PSTATE)
  472. local_irq_enable();
  473. if (regs->psw.mask & PSW_MASK_PSTATE) {
  474. get_user(*((__u16 *) opcode), location);
  475. switch (opcode[0]) {
  476. case 0x28: /* LDR Rx,Ry */
  477. signal = math_emu_ldr(opcode);
  478. break;
  479. case 0x38: /* LER Rx,Ry */
  480. signal = math_emu_ler(opcode);
  481. break;
  482. case 0x60: /* STD R,D(X,B) */
  483. get_user(*((__u16 *) (opcode+2)), location+1);
  484. signal = math_emu_std(opcode, regs);
  485. break;
  486. case 0x68: /* LD R,D(X,B) */
  487. get_user(*((__u16 *) (opcode+2)), location+1);
  488. signal = math_emu_ld(opcode, regs);
  489. break;
  490. case 0x70: /* STE R,D(X,B) */
  491. get_user(*((__u16 *) (opcode+2)), location+1);
  492. signal = math_emu_ste(opcode, regs);
  493. break;
  494. case 0x78: /* LE R,D(X,B) */
  495. get_user(*((__u16 *) (opcode+2)), location+1);
  496. signal = math_emu_le(opcode, regs);
  497. break;
  498. default:
  499. signal = SIGILL;
  500. break;
  501. }
  502. } else
  503. signal = SIGILL;
  504. if (signal == SIGFPE)
  505. do_fp_trap(regs, location,
  506. current->thread.fp_regs.fpc, interruption_code);
  507. else if (signal) {
  508. siginfo_t info;
  509. info.si_signo = signal;
  510. info.si_errno = 0;
  511. info.si_code = ILL_ILLOPN;
  512. info.si_addr = location;
  513. do_trap(interruption_code, signal,
  514. "specification exception", regs, &info);
  515. }
  516. }
  517. #else
  518. DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
  519. ILL_ILLOPN, get_check_address(regs));
  520. #endif
  521. static void data_exception(struct pt_regs * regs, long interruption_code)
  522. {
  523. __u16 __user *location;
  524. int signal = 0;
  525. location = get_check_address(regs);
  526. /*
  527. * We got all needed information from the lowcore and can
  528. * now safely switch on interrupts.
  529. */
  530. if (regs->psw.mask & PSW_MASK_PSTATE)
  531. local_irq_enable();
  532. if (MACHINE_HAS_IEEE)
  533. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  534. #ifdef CONFIG_MATHEMU
  535. else if (regs->psw.mask & PSW_MASK_PSTATE) {
  536. __u8 opcode[6];
  537. get_user(*((__u16 *) opcode), location);
  538. switch (opcode[0]) {
  539. case 0x28: /* LDR Rx,Ry */
  540. signal = math_emu_ldr(opcode);
  541. break;
  542. case 0x38: /* LER Rx,Ry */
  543. signal = math_emu_ler(opcode);
  544. break;
  545. case 0x60: /* STD R,D(X,B) */
  546. get_user(*((__u16 *) (opcode+2)), location+1);
  547. signal = math_emu_std(opcode, regs);
  548. break;
  549. case 0x68: /* LD R,D(X,B) */
  550. get_user(*((__u16 *) (opcode+2)), location+1);
  551. signal = math_emu_ld(opcode, regs);
  552. break;
  553. case 0x70: /* STE R,D(X,B) */
  554. get_user(*((__u16 *) (opcode+2)), location+1);
  555. signal = math_emu_ste(opcode, regs);
  556. break;
  557. case 0x78: /* LE R,D(X,B) */
  558. get_user(*((__u16 *) (opcode+2)), location+1);
  559. signal = math_emu_le(opcode, regs);
  560. break;
  561. case 0xb3:
  562. get_user(*((__u16 *) (opcode+2)), location+1);
  563. signal = math_emu_b3(opcode, regs);
  564. break;
  565. case 0xed:
  566. get_user(*((__u32 *) (opcode+2)),
  567. (__u32 __user *)(location+1));
  568. signal = math_emu_ed(opcode, regs);
  569. break;
  570. case 0xb2:
  571. if (opcode[1] == 0x99) {
  572. get_user(*((__u16 *) (opcode+2)), location+1);
  573. signal = math_emu_srnm(opcode, regs);
  574. } else if (opcode[1] == 0x9c) {
  575. get_user(*((__u16 *) (opcode+2)), location+1);
  576. signal = math_emu_stfpc(opcode, regs);
  577. } else if (opcode[1] == 0x9d) {
  578. get_user(*((__u16 *) (opcode+2)), location+1);
  579. signal = math_emu_lfpc(opcode, regs);
  580. } else
  581. signal = SIGILL;
  582. break;
  583. default:
  584. signal = SIGILL;
  585. break;
  586. }
  587. }
  588. #endif
  589. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  590. signal = SIGFPE;
  591. else
  592. signal = SIGILL;
  593. if (signal == SIGFPE)
  594. do_fp_trap(regs, location,
  595. current->thread.fp_regs.fpc, interruption_code);
  596. else if (signal) {
  597. siginfo_t info;
  598. info.si_signo = signal;
  599. info.si_errno = 0;
  600. info.si_code = ILL_ILLOPN;
  601. info.si_addr = location;
  602. do_trap(interruption_code, signal,
  603. "data exception", regs, &info);
  604. }
  605. }
  606. static void space_switch_exception(struct pt_regs * regs, long int_code)
  607. {
  608. siginfo_t info;
  609. /* Set user psw back to home space mode. */
  610. if (regs->psw.mask & PSW_MASK_PSTATE)
  611. regs->psw.mask |= PSW_ASC_HOME;
  612. /* Send SIGILL. */
  613. info.si_signo = SIGILL;
  614. info.si_errno = 0;
  615. info.si_code = ILL_PRVOPC;
  616. info.si_addr = get_check_address(regs);
  617. do_trap(int_code, SIGILL, "space switch event", regs, &info);
  618. }
  619. asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
  620. {
  621. bust_spinlocks(1);
  622. printk("Kernel stack overflow.\n");
  623. show_regs(regs);
  624. bust_spinlocks(0);
  625. panic("Corrupt kernel stack, can't continue.");
  626. }
  627. /* init is done in lowcore.S and head.S */
  628. void __init trap_init(void)
  629. {
  630. int i;
  631. for (i = 0; i < 128; i++)
  632. pgm_check_table[i] = &default_trap_handler;
  633. pgm_check_table[1] = &illegal_op;
  634. pgm_check_table[2] = &privileged_op;
  635. pgm_check_table[3] = &execute_exception;
  636. pgm_check_table[4] = &do_protection_exception;
  637. pgm_check_table[5] = &addressing_exception;
  638. pgm_check_table[6] = &specification_exception;
  639. pgm_check_table[7] = &data_exception;
  640. pgm_check_table[8] = &overflow_exception;
  641. pgm_check_table[9] = &divide_exception;
  642. pgm_check_table[0x0A] = &overflow_exception;
  643. pgm_check_table[0x0B] = &divide_exception;
  644. pgm_check_table[0x0C] = &hfp_overflow_exception;
  645. pgm_check_table[0x0D] = &hfp_underflow_exception;
  646. pgm_check_table[0x0E] = &hfp_significance_exception;
  647. pgm_check_table[0x0F] = &hfp_divide_exception;
  648. pgm_check_table[0x10] = &do_dat_exception;
  649. pgm_check_table[0x11] = &do_dat_exception;
  650. pgm_check_table[0x12] = &translation_exception;
  651. pgm_check_table[0x13] = &special_op_exception;
  652. #ifdef CONFIG_64BIT
  653. pgm_check_table[0x38] = &do_dat_exception;
  654. pgm_check_table[0x39] = &do_dat_exception;
  655. pgm_check_table[0x3A] = &do_dat_exception;
  656. pgm_check_table[0x3B] = &do_dat_exception;
  657. #endif /* CONFIG_64BIT */
  658. pgm_check_table[0x15] = &operand_exception;
  659. pgm_check_table[0x1C] = &space_switch_exception;
  660. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  661. pgm_check_table[0x40] = &do_monitor_call;
  662. pfault_irq_init();
  663. }