|
@@ -8,8 +8,8 @@
|
|
|
* Copyright (C) 1998 Ulf Carlsson
|
|
|
* Copyright (C) 1999 Silicon Graphics, Inc.
|
|
|
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
|
|
|
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
|
|
|
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
|
|
|
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
|
|
|
*/
|
|
|
#include <linux/bug.h>
|
|
|
#include <linux/compiler.h>
|
|
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
|
|
|
extern asmlinkage void handle_mcheck(void);
|
|
|
extern asmlinkage void handle_reserved(void);
|
|
|
|
|
|
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
|
|
|
- struct mips_fpu_struct *ctx, int has_fpu,
|
|
|
- void *__user *fault_addr);
|
|
|
-
|
|
|
void (*board_be_init)(void);
|
|
|
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
|
|
|
void (*board_nmi_handler_setup)(void);
|
|
@@ -495,6 +491,12 @@ asmlinkage void do_be(struct pt_regs *regs)
|
|
|
#define SYNC 0x0000000f
|
|
|
#define RDHWR 0x0000003b
|
|
|
|
|
|
+/* microMIPS definitions */
|
|
|
+#define MM_POOL32A_FUNC 0xfc00ffff
|
|
|
+#define MM_RDHWR 0x00006b3c
|
|
|
+#define MM_RS 0x001f0000
|
|
|
+#define MM_RT 0x03e00000
|
|
|
+
|
|
|
/*
|
|
|
* The ll_bit is cleared by r*_switch.S
|
|
|
*/
|
|
@@ -609,42 +611,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
|
|
|
* Simulate trapping 'rdhwr' instructions to provide user accessible
|
|
|
* registers not implemented in hardware.
|
|
|
*/
|
|
|
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
|
|
|
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
|
|
|
{
|
|
|
struct thread_info *ti = task_thread_info(current);
|
|
|
|
|
|
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
|
|
+ 1, regs, 0);
|
|
|
+ switch (rd) {
|
|
|
+ case 0: /* CPU number */
|
|
|
+ regs->regs[rt] = smp_processor_id();
|
|
|
+ return 0;
|
|
|
+ case 1: /* SYNCI length */
|
|
|
+ regs->regs[rt] = min(current_cpu_data.dcache.linesz,
|
|
|
+ current_cpu_data.icache.linesz);
|
|
|
+ return 0;
|
|
|
+ case 2: /* Read count register */
|
|
|
+ regs->regs[rt] = read_c0_count();
|
|
|
+ return 0;
|
|
|
+ case 3: /* Count register resolution */
|
|
|
+ switch (current_cpu_data.cputype) {
|
|
|
+ case CPU_20KC:
|
|
|
+ case CPU_25KF:
|
|
|
+ regs->regs[rt] = 1;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ regs->regs[rt] = 2;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ case 29:
|
|
|
+ regs->regs[rt] = ti->tp_value;
|
|
|
+ return 0;
|
|
|
+ default:
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
|
|
|
+{
|
|
|
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
|
|
|
int rd = (opcode & RD) >> 11;
|
|
|
int rt = (opcode & RT) >> 16;
|
|
|
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
|
|
- 1, regs, 0);
|
|
|
- switch (rd) {
|
|
|
- case 0: /* CPU number */
|
|
|
- regs->regs[rt] = smp_processor_id();
|
|
|
- return 0;
|
|
|
- case 1: /* SYNCI length */
|
|
|
- regs->regs[rt] = min(current_cpu_data.dcache.linesz,
|
|
|
- current_cpu_data.icache.linesz);
|
|
|
- return 0;
|
|
|
- case 2: /* Read count register */
|
|
|
- regs->regs[rt] = read_c0_count();
|
|
|
- return 0;
|
|
|
- case 3: /* Count register resolution */
|
|
|
- switch (current_cpu_data.cputype) {
|
|
|
- case CPU_20KC:
|
|
|
- case CPU_25KF:
|
|
|
- regs->regs[rt] = 1;
|
|
|
- break;
|
|
|
- default:
|
|
|
- regs->regs[rt] = 2;
|
|
|
- }
|
|
|
- return 0;
|
|
|
- case 29:
|
|
|
- regs->regs[rt] = ti->tp_value;
|
|
|
- return 0;
|
|
|
- default:
|
|
|
- return -1;
|
|
|
- }
|
|
|
+
|
|
|
+ simulate_rdhwr(regs, rd, rt);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Not ours. */
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
|
|
|
+{
|
|
|
+ if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
|
|
|
+ int rd = (opcode & MM_RS) >> 16;
|
|
|
+ int rt = (opcode & MM_RT) >> 21;
|
|
|
+ simulate_rdhwr(regs, rd, rt);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* Not ours. */
|
|
@@ -826,9 +848,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
|
|
|
asmlinkage void do_bp(struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned int opcode, bcode;
|
|
|
-
|
|
|
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
|
|
|
- goto out_sigsegv;
|
|
|
+ unsigned long epc;
|
|
|
+ u16 instr[2];
|
|
|
+
|
|
|
+ if (get_isa16_mode(regs->cp0_epc)) {
|
|
|
+ /* Calculate EPC. */
|
|
|
+ epc = exception_epc(regs);
|
|
|
+ if (cpu_has_mmips) {
|
|
|
+ if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
|
|
|
+ (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
|
|
|
+ goto out_sigsegv;
|
|
|
+ opcode = (instr[0] << 16) | instr[1];
|
|
|
+ } else {
|
|
|
+ /* MIPS16e mode */
|
|
|
+ if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
|
|
|
+ goto out_sigsegv;
|
|
|
+ bcode = (instr[0] >> 6) & 0x3f;
|
|
|
+ do_trap_or_bp(regs, bcode, "Break");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
|
|
|
+ goto out_sigsegv;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* There is the ancient bug in the MIPS assemblers that the break
|
|
@@ -869,13 +911,22 @@ out_sigsegv:
|
|
|
asmlinkage void do_tr(struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned int opcode, tcode = 0;
|
|
|
+ u16 instr[2];
|
|
|
+ unsigned long epc = exception_epc(regs);
|
|
|
|
|
|
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
|
|
|
- goto out_sigsegv;
|
|
|
+ if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
|
|
|
+ (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
|
|
|
+ goto out_sigsegv;
|
|
|
+ opcode = (instr[0] << 16) | instr[1];
|
|
|
|
|
|
/* Immediate versions don't provide a code. */
|
|
|
- if (!(opcode & OPCODE))
|
|
|
- tcode = ((opcode >> 6) & ((1 << 10) - 1));
|
|
|
+ if (!(opcode & OPCODE)) {
|
|
|
+ if (get_isa16_mode(regs->cp0_epc))
|
|
|
+ /* microMIPS */
|
|
|
+ tcode = (opcode >> 12) & 0x1f;
|
|
|
+ else
|
|
|
+ tcode = ((opcode >> 6) & ((1 << 10) - 1));
|
|
|
+ }
|
|
|
|
|
|
do_trap_or_bp(regs, tcode, "Trap");
|
|
|
return;
|
|
@@ -888,6 +939,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
|
|
|
unsigned long old_epc = regs->cp0_epc;
|
|
|
+ unsigned long old31 = regs->regs[31];
|
|
|
unsigned int opcode = 0;
|
|
|
int status = -1;
|
|
|
|
|
@@ -900,23 +952,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
|
|
|
if (unlikely(compute_return_epc(regs) < 0))
|
|
|
return;
|
|
|
|
|
|
- if (unlikely(get_user(opcode, epc) < 0))
|
|
|
- status = SIGSEGV;
|
|
|
+ if (get_isa16_mode(regs->cp0_epc)) {
|
|
|
+ unsigned short mmop[2] = { 0 };
|
|
|
|
|
|
- if (!cpu_has_llsc && status < 0)
|
|
|
- status = simulate_llsc(regs, opcode);
|
|
|
+ if (unlikely(get_user(mmop[0], epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
+ if (unlikely(get_user(mmop[1], epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
+ opcode = (mmop[0] << 16) | mmop[1];
|
|
|
|
|
|
- if (status < 0)
|
|
|
- status = simulate_rdhwr(regs, opcode);
|
|
|
+ if (status < 0)
|
|
|
+ status = simulate_rdhwr_mm(regs, opcode);
|
|
|
+ } else {
|
|
|
+ if (unlikely(get_user(opcode, epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
|
|
|
- if (status < 0)
|
|
|
- status = simulate_sync(regs, opcode);
|
|
|
+ if (!cpu_has_llsc && status < 0)
|
|
|
+ status = simulate_llsc(regs, opcode);
|
|
|
+
|
|
|
+ if (status < 0)
|
|
|
+ status = simulate_rdhwr_normal(regs, opcode);
|
|
|
+
|
|
|
+ if (status < 0)
|
|
|
+ status = simulate_sync(regs, opcode);
|
|
|
+ }
|
|
|
|
|
|
if (status < 0)
|
|
|
status = SIGILL;
|
|
|
|
|
|
if (unlikely(status > 0)) {
|
|
|
regs->cp0_epc = old_epc; /* Undo skip-over. */
|
|
|
+ regs->regs[31] = old31;
|
|
|
force_sig(status, current);
|
|
|
}
|
|
|
}
|
|
@@ -986,7 +1052,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
|
|
|
asmlinkage void do_cpu(struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned int __user *epc;
|
|
|
- unsigned long old_epc;
|
|
|
+ unsigned long old_epc, old31;
|
|
|
unsigned int opcode;
|
|
|
unsigned int cpid;
|
|
|
int status;
|
|
@@ -1000,26 +1066,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
|
|
case 0:
|
|
|
epc = (unsigned int __user *)exception_epc(regs);
|
|
|
old_epc = regs->cp0_epc;
|
|
|
+ old31 = regs->regs[31];
|
|
|
opcode = 0;
|
|
|
status = -1;
|
|
|
|
|
|
if (unlikely(compute_return_epc(regs) < 0))
|
|
|
return;
|
|
|
|
|
|
- if (unlikely(get_user(opcode, epc) < 0))
|
|
|
- status = SIGSEGV;
|
|
|
+ if (get_isa16_mode(regs->cp0_epc)) {
|
|
|
+ unsigned short mmop[2] = { 0 };
|
|
|
|
|
|
- if (!cpu_has_llsc && status < 0)
|
|
|
- status = simulate_llsc(regs, opcode);
|
|
|
+ if (unlikely(get_user(mmop[0], epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
+ if (unlikely(get_user(mmop[1], epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
+ opcode = (mmop[0] << 16) | mmop[1];
|
|
|
|
|
|
- if (status < 0)
|
|
|
- status = simulate_rdhwr(regs, opcode);
|
|
|
+ if (status < 0)
|
|
|
+ status = simulate_rdhwr_mm(regs, opcode);
|
|
|
+ } else {
|
|
|
+ if (unlikely(get_user(opcode, epc) < 0))
|
|
|
+ status = SIGSEGV;
|
|
|
+
|
|
|
+ if (!cpu_has_llsc && status < 0)
|
|
|
+ status = simulate_llsc(regs, opcode);
|
|
|
+
|
|
|
+ if (status < 0)
|
|
|
+ status = simulate_rdhwr_normal(regs, opcode);
|
|
|
+ }
|
|
|
|
|
|
if (status < 0)
|
|
|
status = SIGILL;
|
|
|
|
|
|
if (unlikely(status > 0)) {
|
|
|
regs->cp0_epc = old_epc; /* Undo skip-over. */
|
|
|
+ regs->regs[31] = old31;
|
|
|
force_sig(status, current);
|
|
|
}
|
|
|
|
|
@@ -1333,7 +1414,7 @@ asmlinkage void cache_parity_error(void)
|
|
|
void ejtag_exception_handler(struct pt_regs *regs)
|
|
|
{
|
|
|
const int field = 2 * sizeof(unsigned long);
|
|
|
- unsigned long depc, old_epc;
|
|
|
+ unsigned long depc, old_epc, old_ra;
|
|
|
unsigned int debug;
|
|
|
|
|
|
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
|
|
@@ -1348,10 +1429,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
|
|
|
* calculation.
|
|
|
*/
|
|
|
old_epc = regs->cp0_epc;
|
|
|
+ old_ra = regs->regs[31];
|
|
|
regs->cp0_epc = depc;
|
|
|
- __compute_return_epc(regs);
|
|
|
+ compute_return_epc(regs);
|
|
|
depc = regs->cp0_epc;
|
|
|
regs->cp0_epc = old_epc;
|
|
|
+ regs->regs[31] = old_ra;
|
|
|
} else
|
|
|
depc += 4;
|
|
|
write_c0_depc(depc);
|
|
@@ -1392,9 +1475,24 @@ void __init *set_except_vector(int n, void *addr)
|
|
|
unsigned long handler = (unsigned long) addr;
|
|
|
unsigned long old_handler = exception_handlers[n];
|
|
|
|
|
|
+#ifdef CONFIG_CPU_MICROMIPS
|
|
|
+ /*
|
|
|
+ * Only the TLB handlers are cache aligned with an even
|
|
|
+ * address. All other handlers are on an odd address and
|
|
|
+ * require no modification. Otherwise, MIPS32 mode will
|
|
|
+ * be entered when handling any TLB exceptions. That
|
|
|
+ * would be bad...since we must stay in microMIPS mode.
|
|
|
+ */
|
|
|
+ if (!(handler & 0x1))
|
|
|
+ handler |= 1;
|
|
|
+#endif
|
|
|
exception_handlers[n] = handler;
|
|
|
if (n == 0 && cpu_has_divec) {
|
|
|
+#ifdef CONFIG_CPU_MICROMIPS
|
|
|
+ unsigned long jump_mask = ~((1 << 27) - 1);
|
|
|
+#else
|
|
|
unsigned long jump_mask = ~((1 << 28) - 1);
|
|
|
+#endif
|
|
|
u32 *buf = (u32 *)(ebase + 0x200);
|
|
|
unsigned int k0 = 26;
|
|
|
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
|
|
@@ -1421,17 +1519,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|
|
unsigned long handler;
|
|
|
unsigned long old_handler = vi_handlers[n];
|
|
|
int srssets = current_cpu_data.srsets;
|
|
|
- u32 *w;
|
|
|
+ u16 *h;
|
|
|
unsigned char *b;
|
|
|
|
|
|
BUG_ON(!cpu_has_veic && !cpu_has_vint);
|
|
|
+ BUG_ON((n < 0) && (n > 9));
|
|
|
|
|
|
if (addr == NULL) {
|
|
|
handler = (unsigned long) do_default_vi;
|
|
|
srs = 0;
|
|
|
} else
|
|
|
handler = (unsigned long) addr;
|
|
|
- vi_handlers[n] = (unsigned long) addr;
|
|
|
+ vi_handlers[n] = handler;
|
|
|
|
|
|
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
|
|
|
|
|
@@ -1450,9 +1549,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|
|
if (srs == 0) {
|
|
|
/*
|
|
|
* If no shadow set is selected then use the default handler
|
|
|
- * that does normal register saving and a standard interrupt exit
|
|
|
+ * that does normal register saving and standard interrupt exit
|
|
|
*/
|
|
|
-
|
|
|
extern char except_vec_vi, except_vec_vi_lui;
|
|
|
extern char except_vec_vi_ori, except_vec_vi_end;
|
|
|
extern char rollback_except_vec_vi;
|
|
@@ -1465,11 +1563,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|
|
* Status.IM bit to be masked before going there.
|
|
|
*/
|
|
|
extern char except_vec_vi_mori;
|
|
|
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
|
|
|
+ const int mori_offset = &except_vec_vi_mori - vec_start + 2;
|
|
|
+#else
|
|
|
const int mori_offset = &except_vec_vi_mori - vec_start;
|
|
|
+#endif
|
|
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
|
|
- const int handler_len = &except_vec_vi_end - vec_start;
|
|
|
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
|
|
|
+ const int lui_offset = &except_vec_vi_lui - vec_start + 2;
|
|
|
+ const int ori_offset = &except_vec_vi_ori - vec_start + 2;
|
|
|
+#else
|
|
|
const int lui_offset = &except_vec_vi_lui - vec_start;
|
|
|
const int ori_offset = &except_vec_vi_ori - vec_start;
|
|
|
+#endif
|
|
|
+ const int handler_len = &except_vec_vi_end - vec_start;
|
|
|
|
|
|
if (handler_len > VECTORSPACING) {
|
|
|
/*
|
|
@@ -1479,30 +1586,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
|
|
|
panic("VECTORSPACING too small");
|
|
|
}
|
|
|
|
|
|
- memcpy(b, vec_start, handler_len);
|
|
|
+ set_handler(((unsigned long)b - ebase), vec_start,
|
|
|
+#ifdef CONFIG_CPU_MICROMIPS
|
|
|
+ (handler_len - 1));
|
|
|
+#else
|
|
|
+ handler_len);
|
|
|
+#endif
|
|
|
#ifdef CONFIG_MIPS_MT_SMTC
|
|
|
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
|
|
|
|
|
|
- w = (u32 *)(b + mori_offset);
|
|
|
- *w = (*w & 0xffff0000) | (0x100 << n);
|
|
|
+ h = (u16 *)(b + mori_offset);
|
|
|
+ *h = (0x100 << n);
|
|
|
#endif /* CONFIG_MIPS_MT_SMTC */
|
|
|
- w = (u32 *)(b + lui_offset);
|
|
|
- *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
|
|
|
- w = (u32 *)(b + ori_offset);
|
|
|
- *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
|
|
|
+ h = (u16 *)(b + lui_offset);
|
|
|
+ *h = (handler >> 16) & 0xffff;
|
|
|
+ h = (u16 *)(b + ori_offset);
|
|
|
+ *h = (handler & 0xffff);
|
|
|
local_flush_icache_range((unsigned long)b,
|
|
|
(unsigned long)(b+handler_len));
|
|
|
}
|
|
|
else {
|
|
|
/*
|
|
|
- * In other cases jump directly to the interrupt handler
|
|
|
- *
|
|
|
- * It is the handlers responsibility to save registers if required
|
|
|
- * (eg hi/lo) and return from the exception using "eret"
|
|
|
+ * In other cases jump directly to the interrupt handler. It
|
|
|
+ * is the handler's responsibility to save registers if required
|
|
|
+ * (eg hi/lo) and return from the exception using "eret".
|
|
|
*/
|
|
|
- w = (u32 *)b;
|
|
|
- *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
|
|
|
- *w = 0;
|
|
|
+ u32 insn;
|
|
|
+
|
|
|
+ h = (u16 *)b;
|
|
|
+ /* j handler */
|
|
|
+#ifdef CONFIG_CPU_MICROMIPS
|
|
|
+ insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
|
|
|
+#else
|
|
|
+ insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
|
|
|
+#endif
|
|
|
+ h[0] = (insn >> 16) & 0xffff;
|
|
|
+ h[1] = insn & 0xffff;
|
|
|
+ h[2] = 0;
|
|
|
+ h[3] = 0;
|
|
|
local_flush_icache_range((unsigned long)b,
|
|
|
(unsigned long)(b+8));
|
|
|
}
|
|
@@ -1663,7 +1784,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
|
|
|
/* Install CPU exception handler */
|
|
|
void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
|
|
|
{
|
|
|
+#ifdef CONFIG_CPU_MICROMIPS
|
|
|
+ memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
|
|
|
+#else
|
|
|
memcpy((void *)(ebase + offset), addr, size);
|
|
|
+#endif
|
|
|
local_flush_icache_range(ebase + offset, ebase + offset + size);
|
|
|
}
|
|
|
|
|
@@ -1697,8 +1822,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
|
|
|
|
|
|
void __init trap_init(void)
|
|
|
{
|
|
|
- extern char except_vec3_generic, except_vec3_r4000;
|
|
|
+ extern char except_vec3_generic;
|
|
|
extern char except_vec4;
|
|
|
+ extern char except_vec3_r4000;
|
|
|
unsigned long i;
|
|
|
int rollback;
|
|
|
|
|
@@ -1831,11 +1957,11 @@ void __init trap_init(void)
|
|
|
|
|
|
if (cpu_has_vce)
|
|
|
/* Special exception: R4[04]00 uses also the divec space. */
|
|
|
- memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
|
|
|
+ set_handler(0x180, &except_vec3_r4000, 0x100);
|
|
|
else if (cpu_has_4kex)
|
|
|
- memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
|
|
|
+ set_handler(0x180, &except_vec3_generic, 0x80);
|
|
|
else
|
|
|
- memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
|
|
|
+ set_handler(0x080, &except_vec3_generic, 0x80);
|
|
|
|
|
|
local_flush_icache_range(ebase, ebase + 0x400);
|
|
|
flush_tlb_handlers();
|