traps.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * linux/arch/parisc/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
  6. */
  7. /*
  8. * 'Traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'asm.s'.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/timer.h>
  17. #include <linux/delay.h>
  18. #include <linux/mm.h>
  19. #include <linux/module.h>
  20. #include <linux/smp.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/console.h>
  25. #include <linux/kallsyms.h>
  26. #include <linux/bug.h>
  27. #include <asm/assembly.h>
  28. #include <asm/system.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/irq.h>
  32. #include <asm/traps.h>
  33. #include <asm/unaligned.h>
  34. #include <asm/atomic.h>
  35. #include <asm/smp.h>
  36. #include <asm/pdc.h>
  37. #include <asm/pdc_chassis.h>
  38. #include <asm/unwind.h>
  39. #include <asm/tlbflush.h>
  40. #include <asm/cacheflush.h>
  41. #include "../math-emu/math-emu.h" /* for handle_fpe() */
  42. #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
  43. /* dumped to the console via printk) */
  44. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  45. DEFINE_SPINLOCK(pa_dbit_lock);
  46. #endif
  47. static int printbinary(char *buf, unsigned long x, int nbits)
  48. {
  49. unsigned long mask = 1UL << (nbits - 1);
  50. while (mask != 0) {
  51. *buf++ = (mask & x ? '1' : '0');
  52. mask >>= 1;
  53. }
  54. *buf = '\0';
  55. return nbits;
  56. }
  57. #ifdef CONFIG_64BIT
  58. #define RFMT "%016lx"
  59. #else
  60. #define RFMT "%08lx"
  61. #endif
  62. #define FFMT "%016llx" /* fpregs are 64-bit always */
  63. #define PRINTREGS(lvl,r,f,fmt,x) \
  64. printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
  65. lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
  66. (r)[(x)+2], (r)[(x)+3])
  67. static void print_gr(char *level, struct pt_regs *regs)
  68. {
  69. int i;
  70. char buf[64];
  71. printk("%s\n", level);
  72. printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
  73. printbinary(buf, regs->gr[0], 32);
  74. printk("%sPSW: %s %s\n", level, buf, print_tainted());
  75. for (i = 0; i < 32; i += 4)
  76. PRINTREGS(level, regs->gr, "r", RFMT, i);
  77. }
  78. static void print_fr(char *level, struct pt_regs *regs)
  79. {
  80. int i;
  81. char buf[64];
  82. struct { u32 sw[2]; } s;
  83. /* FR are 64bit everywhere. Need to use asm to get the content
  84. * of fpsr/fper1, and we assume that we won't have a FP Identify
  85. * in our way, otherwise we're screwed.
  86. * The fldd is used to restore the T-bit if there was one, as the
  87. * store clears it anyway.
  88. * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
  89. asm volatile ("fstd %%fr0,0(%1) \n\t"
  90. "fldd 0(%1),%%fr0 \n\t"
  91. : "=m" (s) : "r" (&s) : "r0");
  92. printk("%s\n", level);
  93. printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
  94. printbinary(buf, s.sw[0], 32);
  95. printk("%sFPSR: %s\n", level, buf);
  96. printk("%sFPER1: %08x\n", level, s.sw[1]);
  97. /* here we'll print fr0 again, tho it'll be meaningless */
  98. for (i = 0; i < 32; i += 4)
  99. PRINTREGS(level, regs->fr, "fr", FFMT, i);
  100. }
  101. void show_regs(struct pt_regs *regs)
  102. {
  103. int i;
  104. char *level;
  105. unsigned long cr30, cr31;
  106. level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
  107. print_gr(level, regs);
  108. for (i = 0; i < 8; i += 4)
  109. PRINTREGS(level, regs->sr, "sr", RFMT, i);
  110. if (user_mode(regs))
  111. print_fr(level, regs);
  112. cr30 = mfctl(30);
  113. cr31 = mfctl(31);
  114. printk("%s\n", level);
  115. printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
  116. level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
  117. printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
  118. level, regs->iir, regs->isr, regs->ior);
  119. printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
  120. level, current_thread_info()->cpu, cr30, cr31);
  121. printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
  122. printk(level);
  123. print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
  124. printk(level);
  125. print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
  126. printk(level);
  127. print_symbol(" RP(r2): %s\n", regs->gr[2]);
  128. }
  129. void dump_stack(void)
  130. {
  131. show_stack(NULL, NULL);
  132. }
  133. EXPORT_SYMBOL(dump_stack);
  134. static void do_show_stack(struct unwind_frame_info *info)
  135. {
  136. int i = 1;
  137. printk(KERN_CRIT "Backtrace:\n");
  138. while (i <= 16) {
  139. if (unwind_once(info) < 0 || info->ip == 0)
  140. break;
  141. if (__kernel_text_address(info->ip)) {
  142. printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
  143. #ifdef CONFIG_KALLSYMS
  144. print_symbol("%s\n", info->ip);
  145. #else
  146. if ((i & 0x03) == 0)
  147. printk("\n");
  148. #endif
  149. i++;
  150. }
  151. }
  152. printk("\n");
  153. }
  154. void show_stack(struct task_struct *task, unsigned long *s)
  155. {
  156. struct unwind_frame_info info;
  157. if (!task) {
  158. unsigned long sp;
  159. HERE:
  160. asm volatile ("copy %%r30, %0" : "=r"(sp));
  161. {
  162. struct pt_regs r;
  163. memset(&r, 0, sizeof(struct pt_regs));
  164. r.iaoq[0] = (unsigned long)&&HERE;
  165. r.gr[2] = (unsigned long)__builtin_return_address(0);
  166. r.gr[30] = sp;
  167. unwind_frame_init(&info, current, &r);
  168. }
  169. } else {
  170. unwind_frame_init_from_blocked_task(&info, task);
  171. }
  172. do_show_stack(&info);
  173. }
  174. int is_valid_bugaddr(unsigned long iaoq)
  175. {
  176. return 1;
  177. }
  178. void die_if_kernel(char *str, struct pt_regs *regs, long err)
  179. {
  180. if (user_mode(regs)) {
  181. if (err == 0)
  182. return; /* STFU */
  183. printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
  184. current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
  185. #ifdef PRINT_USER_FAULTS
  186. /* XXX for debugging only */
  187. show_regs(regs);
  188. #endif
  189. return;
  190. }
  191. oops_in_progress = 1;
  192. /* Amuse the user in a SPARC fashion */
  193. if (err) printk(
  194. KERN_CRIT " _______________________________ \n"
  195. KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
  196. KERN_CRIT " ------------------------------- \n"
  197. KERN_CRIT " \\ ^__^\n"
  198. KERN_CRIT " \\ (xx)\\_______\n"
  199. KERN_CRIT " (__)\\ )\\/\\\n"
  200. KERN_CRIT " U ||----w |\n"
  201. KERN_CRIT " || ||\n");
  202. /* unlock the pdc lock if necessary */
  203. pdc_emergency_unlock();
  204. /* maybe the kernel hasn't booted very far yet and hasn't been able
  205. * to initialize the serial or STI console. In that case we should
  206. * re-enable the pdc console, so that the user will be able to
  207. * identify the problem. */
  208. if (!console_drivers)
  209. pdc_console_restart();
  210. if (err)
  211. printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
  212. current->comm, task_pid_nr(current), str, err);
  213. /* Wot's wrong wif bein' racy? */
  214. if (current->thread.flags & PARISC_KERNEL_DEATH) {
  215. printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
  216. local_irq_enable();
  217. while (1);
  218. }
  219. current->thread.flags |= PARISC_KERNEL_DEATH;
  220. show_regs(regs);
  221. dump_stack();
  222. add_taint(TAINT_DIE);
  223. if (in_interrupt())
  224. panic("Fatal exception in interrupt");
  225. if (panic_on_oops) {
  226. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  227. ssleep(5);
  228. panic("Fatal exception");
  229. }
  230. do_exit(SIGSEGV);
  231. }
  232. int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
  233. {
  234. return syscall(regs);
  235. }
  236. /* gdb uses break 4,8 */
  237. #define GDB_BREAK_INSN 0x10004
  238. static void handle_gdb_break(struct pt_regs *regs, int wot)
  239. {
  240. struct siginfo si;
  241. si.si_signo = SIGTRAP;
  242. si.si_errno = 0;
  243. si.si_code = wot;
  244. si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
  245. force_sig_info(SIGTRAP, &si, current);
  246. }
  247. static void handle_break(struct pt_regs *regs)
  248. {
  249. unsigned iir = regs->iir;
  250. if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
  251. /* check if a BUG() or WARN() trapped here. */
  252. enum bug_trap_type tt;
  253. tt = report_bug(regs->iaoq[0] & ~3, regs);
  254. if (tt == BUG_TRAP_TYPE_WARN) {
  255. regs->iaoq[0] += 4;
  256. regs->iaoq[1] += 4;
  257. return; /* return to next instruction when WARN_ON(). */
  258. }
  259. die_if_kernel("Unknown kernel breakpoint", regs,
  260. (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
  261. }
  262. #ifdef PRINT_USER_FAULTS
  263. if (unlikely(iir != GDB_BREAK_INSN)) {
  264. printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
  265. iir & 31, (iir>>13) & ((1<<13)-1),
  266. task_pid_nr(current), current->comm);
  267. show_regs(regs);
  268. }
  269. #endif
  270. /* send standard GDB signal */
  271. handle_gdb_break(regs, TRAP_BRKPT);
  272. }
  273. static void default_trap(int code, struct pt_regs *regs)
  274. {
  275. printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
  276. show_regs(regs);
  277. }
  278. void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
  279. void transfer_pim_to_trap_frame(struct pt_regs *regs)
  280. {
  281. register int i;
  282. extern unsigned int hpmc_pim_data[];
  283. struct pdc_hpmc_pim_11 *pim_narrow;
  284. struct pdc_hpmc_pim_20 *pim_wide;
  285. if (boot_cpu_data.cpu_type >= pcxu) {
  286. pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
  287. /*
  288. * Note: The following code will probably generate a
  289. * bunch of truncation error warnings from the compiler.
  290. * Could be handled with an ifdef, but perhaps there
  291. * is a better way.
  292. */
  293. regs->gr[0] = pim_wide->cr[22];
  294. for (i = 1; i < 32; i++)
  295. regs->gr[i] = pim_wide->gr[i];
  296. for (i = 0; i < 32; i++)
  297. regs->fr[i] = pim_wide->fr[i];
  298. for (i = 0; i < 8; i++)
  299. regs->sr[i] = pim_wide->sr[i];
  300. regs->iasq[0] = pim_wide->cr[17];
  301. regs->iasq[1] = pim_wide->iasq_back;
  302. regs->iaoq[0] = pim_wide->cr[18];
  303. regs->iaoq[1] = pim_wide->iaoq_back;
  304. regs->sar = pim_wide->cr[11];
  305. regs->iir = pim_wide->cr[19];
  306. regs->isr = pim_wide->cr[20];
  307. regs->ior = pim_wide->cr[21];
  308. }
  309. else {
  310. pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
  311. regs->gr[0] = pim_narrow->cr[22];
  312. for (i = 1; i < 32; i++)
  313. regs->gr[i] = pim_narrow->gr[i];
  314. for (i = 0; i < 32; i++)
  315. regs->fr[i] = pim_narrow->fr[i];
  316. for (i = 0; i < 8; i++)
  317. regs->sr[i] = pim_narrow->sr[i];
  318. regs->iasq[0] = pim_narrow->cr[17];
  319. regs->iasq[1] = pim_narrow->iasq_back;
  320. regs->iaoq[0] = pim_narrow->cr[18];
  321. regs->iaoq[1] = pim_narrow->iaoq_back;
  322. regs->sar = pim_narrow->cr[11];
  323. regs->iir = pim_narrow->cr[19];
  324. regs->isr = pim_narrow->cr[20];
  325. regs->ior = pim_narrow->cr[21];
  326. }
  327. /*
  328. * The following fields only have meaning if we came through
  329. * another path. So just zero them here.
  330. */
  331. regs->ksp = 0;
  332. regs->kpc = 0;
  333. regs->orig_r28 = 0;
  334. }
  335. /*
  336. * This routine is called as a last resort when everything else
  337. * has gone clearly wrong. We get called for faults in kernel space,
  338. * and HPMC's.
  339. */
  340. void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
  341. {
  342. static DEFINE_SPINLOCK(terminate_lock);
  343. oops_in_progress = 1;
  344. set_eiem(0);
  345. local_irq_disable();
  346. spin_lock(&terminate_lock);
  347. /* unlock the pdc lock if necessary */
  348. pdc_emergency_unlock();
  349. /* restart pdc console if necessary */
  350. if (!console_drivers)
  351. pdc_console_restart();
  352. /* Not all paths will gutter the processor... */
  353. switch(code){
  354. case 1:
  355. transfer_pim_to_trap_frame(regs);
  356. break;
  357. default:
  358. /* Fall through */
  359. break;
  360. }
  361. {
  362. /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
  363. struct unwind_frame_info info;
  364. unwind_frame_init(&info, current, regs);
  365. do_show_stack(&info);
  366. }
  367. printk("\n");
  368. printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
  369. msg, code, regs, offset);
  370. show_regs(regs);
  371. spin_unlock(&terminate_lock);
  372. /* put soft power button back under hardware control;
  373. * if the user had pressed it once at any time, the
  374. * system will shut down immediately right here. */
  375. pdc_soft_power_button(0);
  376. /* Call kernel panic() so reboot timeouts work properly
  377. * FIXME: This function should be on the list of
  378. * panic notifiers, and we should call panic
  379. * directly from the location that we wish.
  380. * e.g. We should not call panic from
  381. * parisc_terminate, but rather the oter way around.
  382. * This hack works, prints the panic message twice,
  383. * and it enables reboot timers!
  384. */
  385. panic(msg);
  386. }
  387. void handle_interruption(int code, struct pt_regs *regs)
  388. {
  389. unsigned long fault_address = 0;
  390. unsigned long fault_space = 0;
  391. struct siginfo si;
  392. if (code == 1)
  393. pdc_console_restart(); /* switch back to pdc if HPMC */
  394. else
  395. local_irq_enable();
  396. /* Security check:
  397. * If the priority level is still user, and the
  398. * faulting space is not equal to the active space
  399. * then the user is attempting something in a space
  400. * that does not belong to them. Kill the process.
  401. *
  402. * This is normally the situation when the user
  403. * attempts to jump into the kernel space at the
  404. * wrong offset, be it at the gateway page or a
  405. * random location.
  406. *
  407. * We cannot normally signal the process because it
  408. * could *be* on the gateway page, and processes
  409. * executing on the gateway page can't have signals
  410. * delivered.
  411. *
  412. * We merely readjust the address into the users
  413. * space, at a destination address of zero, and
  414. * allow processing to continue.
  415. */
  416. if (((unsigned long)regs->iaoq[0] & 3) &&
  417. ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
  418. /* Kill the user process later */
  419. regs->iaoq[0] = 0 | 3;
  420. regs->iaoq[1] = regs->iaoq[0] + 4;
  421. regs->iasq[0] = regs->iasq[0] = regs->sr[7];
  422. regs->gr[0] &= ~PSW_B;
  423. return;
  424. }
  425. #if 0
  426. printk(KERN_CRIT "Interruption # %d\n", code);
  427. #endif
  428. switch(code) {
  429. case 1:
  430. /* High-priority machine check (HPMC) */
  431. /* set up a new led state on systems shipped with a LED State panel */
  432. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
  433. parisc_terminate("High Priority Machine Check (HPMC)",
  434. regs, code, 0);
  435. /* NOT REACHED */
  436. case 2:
  437. /* Power failure interrupt */
  438. printk(KERN_CRIT "Power failure interrupt !\n");
  439. return;
  440. case 3:
  441. /* Recovery counter trap */
  442. regs->gr[0] &= ~PSW_R;
  443. if (user_space(regs))
  444. handle_gdb_break(regs, TRAP_TRACE);
  445. /* else this must be the start of a syscall - just let it run */
  446. return;
  447. case 5:
  448. /* Low-priority machine check */
  449. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
  450. flush_cache_all();
  451. flush_tlb_all();
  452. cpu_lpmc(5, regs);
  453. return;
  454. case 6:
  455. /* Instruction TLB miss fault/Instruction page fault */
  456. fault_address = regs->iaoq[0];
  457. fault_space = regs->iasq[0];
  458. break;
  459. case 8:
  460. /* Illegal instruction trap */
  461. die_if_kernel("Illegal instruction", regs, code);
  462. si.si_code = ILL_ILLOPC;
  463. goto give_sigill;
  464. case 9:
  465. /* Break instruction trap */
  466. handle_break(regs);
  467. return;
  468. case 10:
  469. /* Privileged operation trap */
  470. die_if_kernel("Privileged operation", regs, code);
  471. si.si_code = ILL_PRVOPC;
  472. goto give_sigill;
  473. case 11:
  474. /* Privileged register trap */
  475. if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
  476. /* This is a MFCTL cr26/cr27 to gr instruction.
  477. * PCXS traps on this, so we need to emulate it.
  478. */
  479. if (regs->iir & 0x00200000)
  480. regs->gr[regs->iir & 0x1f] = mfctl(27);
  481. else
  482. regs->gr[regs->iir & 0x1f] = mfctl(26);
  483. regs->iaoq[0] = regs->iaoq[1];
  484. regs->iaoq[1] += 4;
  485. regs->iasq[0] = regs->iasq[1];
  486. return;
  487. }
  488. die_if_kernel("Privileged register usage", regs, code);
  489. si.si_code = ILL_PRVREG;
  490. give_sigill:
  491. si.si_signo = SIGILL;
  492. si.si_errno = 0;
  493. si.si_addr = (void __user *) regs->iaoq[0];
  494. force_sig_info(SIGILL, &si, current);
  495. return;
  496. case 12:
  497. /* Overflow Trap, let the userland signal handler do the cleanup */
  498. si.si_signo = SIGFPE;
  499. si.si_code = FPE_INTOVF;
  500. si.si_addr = (void __user *) regs->iaoq[0];
  501. force_sig_info(SIGFPE, &si, current);
  502. return;
  503. case 13:
  504. /* Conditional Trap
  505. The condition succeeds in an instruction which traps
  506. on condition */
  507. if(user_mode(regs)){
  508. si.si_signo = SIGFPE;
  509. /* Set to zero, and let the userspace app figure it out from
  510. the insn pointed to by si_addr */
  511. si.si_code = 0;
  512. si.si_addr = (void __user *) regs->iaoq[0];
  513. force_sig_info(SIGFPE, &si, current);
  514. return;
  515. }
  516. /* The kernel doesn't want to handle condition codes */
  517. break;
  518. case 14:
  519. /* Assist Exception Trap, i.e. floating point exception. */
  520. die_if_kernel("Floating point exception", regs, 0); /* quiet */
  521. handle_fpe(regs);
  522. return;
  523. case 15:
  524. /* Data TLB miss fault/Data page fault */
  525. /* Fall through */
  526. case 16:
  527. /* Non-access instruction TLB miss fault */
  528. /* The instruction TLB entry needed for the target address of the FIC
  529. is absent, and hardware can't find it, so we get to cleanup */
  530. /* Fall through */
  531. case 17:
  532. /* Non-access data TLB miss fault/Non-access data page fault */
  533. /* FIXME:
  534. Still need to add slow path emulation code here!
  535. If the insn used a non-shadow register, then the tlb
  536. handlers could not have their side-effect (e.g. probe
  537. writing to a target register) emulated since rfir would
  538. erase the changes to said register. Instead we have to
  539. setup everything, call this function we are in, and emulate
  540. by hand. Technically we need to emulate:
  541. fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
  542. */
  543. fault_address = regs->ior;
  544. fault_space = regs->isr;
  545. break;
  546. case 18:
  547. /* PCXS only -- later cpu's split this into types 26,27 & 28 */
  548. /* Check for unaligned access */
  549. if (check_unaligned(regs)) {
  550. handle_unaligned(regs);
  551. return;
  552. }
  553. /* Fall Through */
  554. case 26:
  555. /* PCXL: Data memory access rights trap */
  556. fault_address = regs->ior;
  557. fault_space = regs->isr;
  558. break;
  559. case 19:
  560. /* Data memory break trap */
  561. regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
  562. /* fall thru */
  563. case 21:
  564. /* Page reference trap */
  565. handle_gdb_break(regs, TRAP_HWBKPT);
  566. return;
  567. case 25:
  568. /* Taken branch trap */
  569. regs->gr[0] &= ~PSW_T;
  570. if (user_space(regs))
  571. handle_gdb_break(regs, TRAP_BRANCH);
  572. /* else this must be the start of a syscall - just let it
  573. * run.
  574. */
  575. return;
  576. case 7:
  577. /* Instruction access rights */
  578. /* PCXL: Instruction memory protection trap */
  579. /*
  580. * This could be caused by either: 1) a process attempting
  581. * to execute within a vma that does not have execute
  582. * permission, or 2) an access rights violation caused by a
  583. * flush only translation set up by ptep_get_and_clear().
  584. * So we check the vma permissions to differentiate the two.
  585. * If the vma indicates we have execute permission, then
  586. * the cause is the latter one. In this case, we need to
  587. * call do_page_fault() to fix the problem.
  588. */
  589. if (user_mode(regs)) {
  590. struct vm_area_struct *vma;
  591. down_read(&current->mm->mmap_sem);
  592. vma = find_vma(current->mm,regs->iaoq[0]);
  593. if (vma && (regs->iaoq[0] >= vma->vm_start)
  594. && (vma->vm_flags & VM_EXEC)) {
  595. fault_address = regs->iaoq[0];
  596. fault_space = regs->iasq[0];
  597. up_read(&current->mm->mmap_sem);
  598. break; /* call do_page_fault() */
  599. }
  600. up_read(&current->mm->mmap_sem);
  601. }
  602. /* Fall Through */
  603. case 27:
  604. /* Data memory protection ID trap */
  605. die_if_kernel("Protection id trap", regs, code);
  606. si.si_code = SEGV_MAPERR;
  607. si.si_signo = SIGSEGV;
  608. si.si_errno = 0;
  609. if (code == 7)
  610. si.si_addr = (void __user *) regs->iaoq[0];
  611. else
  612. si.si_addr = (void __user *) regs->ior;
  613. force_sig_info(SIGSEGV, &si, current);
  614. return;
  615. case 28:
  616. /* Unaligned data reference trap */
  617. handle_unaligned(regs);
  618. return;
  619. default:
  620. if (user_mode(regs)) {
  621. #ifdef PRINT_USER_FAULTS
  622. printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
  623. task_pid_nr(current), current->comm);
  624. show_regs(regs);
  625. #endif
  626. /* SIGBUS, for lack of a better one. */
  627. si.si_signo = SIGBUS;
  628. si.si_code = BUS_OBJERR;
  629. si.si_errno = 0;
  630. si.si_addr = (void __user *) regs->ior;
  631. force_sig_info(SIGBUS, &si, current);
  632. return;
  633. }
  634. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
  635. parisc_terminate("Unexpected interruption", regs, code, 0);
  636. /* NOT REACHED */
  637. }
  638. if (user_mode(regs)) {
  639. if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
  640. #ifdef PRINT_USER_FAULTS
  641. if (fault_space == 0)
  642. printk(KERN_DEBUG "User Fault on Kernel Space ");
  643. else
  644. printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
  645. code);
  646. printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
  647. show_regs(regs);
  648. #endif
  649. si.si_signo = SIGSEGV;
  650. si.si_errno = 0;
  651. si.si_code = SEGV_MAPERR;
  652. si.si_addr = (void __user *) regs->ior;
  653. force_sig_info(SIGSEGV, &si, current);
  654. return;
  655. }
  656. }
  657. else {
  658. /*
  659. * The kernel should never fault on its own address space.
  660. */
  661. if (fault_space == 0)
  662. {
  663. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
  664. parisc_terminate("Kernel Fault", regs, code, fault_address);
  665. }
  666. }
  667. do_page_fault(regs, code, fault_address);
  668. }
  669. int __init check_ivt(void *iva)
  670. {
  671. extern const u32 os_hpmc[];
  672. extern const u32 os_hpmc_end[];
  673. int i;
  674. u32 check = 0;
  675. u32 *ivap;
  676. u32 *hpmcp;
  677. u32 length;
  678. if (strcmp((char *)iva, "cows can fly"))
  679. return -1;
  680. ivap = (u32 *)iva;
  681. for (i = 0; i < 8; i++)
  682. *ivap++ = 0;
  683. /* Compute Checksum for HPMC handler */
  684. length = os_hpmc_end - os_hpmc;
  685. ivap[7] = length;
  686. hpmcp = (u32 *)os_hpmc;
  687. for (i=0; i<length/4; i++)
  688. check += *hpmcp++;
  689. for (i=0; i<8; i++)
  690. check += ivap[i];
  691. ivap[5] = -check;
  692. return 0;
  693. }
  694. #ifndef CONFIG_64BIT
  695. extern const void fault_vector_11;
  696. #endif
  697. extern const void fault_vector_20;
  698. void __init trap_init(void)
  699. {
  700. void *iva;
  701. if (boot_cpu_data.cpu_type >= pcxu)
  702. iva = (void *) &fault_vector_20;
  703. else
  704. #ifdef CONFIG_64BIT
  705. panic("Can't boot 64-bit OS on PA1.1 processor!");
  706. #else
  707. iva = (void *) &fault_vector_11;
  708. #endif
  709. if (check_ivt(iva))
  710. panic("IVT invalid");
  711. }