traps.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
  7. * Copyright (C) 1995, 1996 Paul M. Antoine
  8. * Copyright (C) 1998 Ulf Carlsson
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11. * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  12. * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
  13. */
  14. #include <linux/bug.h>
  15. #include <linux/init.h>
  16. #include <linux/mm.h>
  17. #include <linux/module.h>
  18. #include <linux/sched.h>
  19. #include <linux/smp.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/interrupt.h>
  24. #include <asm/bootinfo.h>
  25. #include <asm/branch.h>
  26. #include <asm/break.h>
  27. #include <asm/cpu.h>
  28. #include <asm/dsp.h>
  29. #include <asm/fpu.h>
  30. #include <asm/mipsregs.h>
  31. #include <asm/mipsmtregs.h>
  32. #include <asm/module.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/ptrace.h>
  35. #include <asm/sections.h>
  36. #include <asm/system.h>
  37. #include <asm/tlbdebug.h>
  38. #include <asm/traps.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/watch.h>
  42. #include <asm/types.h>
  43. #include <asm/stacktrace.h>
  44. extern asmlinkage void handle_int(void);
  45. extern asmlinkage void handle_tlbm(void);
  46. extern asmlinkage void handle_tlbl(void);
  47. extern asmlinkage void handle_tlbs(void);
  48. extern asmlinkage void handle_adel(void);
  49. extern asmlinkage void handle_ades(void);
  50. extern asmlinkage void handle_ibe(void);
  51. extern asmlinkage void handle_dbe(void);
  52. extern asmlinkage void handle_sys(void);
  53. extern asmlinkage void handle_bp(void);
  54. extern asmlinkage void handle_ri(void);
  55. extern asmlinkage void handle_ri_rdhwr_vivt(void);
  56. extern asmlinkage void handle_ri_rdhwr(void);
  57. extern asmlinkage void handle_cpu(void);
  58. extern asmlinkage void handle_ov(void);
  59. extern asmlinkage void handle_tr(void);
  60. extern asmlinkage void handle_fpe(void);
  61. extern asmlinkage void handle_mdmx(void);
  62. extern asmlinkage void handle_watch(void);
  63. extern asmlinkage void handle_mt(void);
  64. extern asmlinkage void handle_dsp(void);
  65. extern asmlinkage void handle_mcheck(void);
  66. extern asmlinkage void handle_reserved(void);
  67. extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
  68. struct mips_fpu_struct *ctx, int has_fpu);
  69. void (*board_be_init)(void);
  70. int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
  71. void (*board_nmi_handler_setup)(void);
  72. void (*board_ejtag_handler_setup)(void);
  73. void (*board_bind_eic_interrupt)(int irq, int regset);
  74. static void show_raw_backtrace(unsigned long reg29)
  75. {
  76. unsigned long *sp = (unsigned long *)reg29;
  77. unsigned long addr;
  78. printk("Call Trace:");
  79. #ifdef CONFIG_KALLSYMS
  80. printk("\n");
  81. #endif
  82. while (!kstack_end(sp)) {
  83. addr = *sp++;
  84. if (__kernel_text_address(addr))
  85. print_ip_sym(addr);
  86. }
  87. printk("\n");
  88. }
  89. #ifdef CONFIG_KALLSYMS
  90. int raw_show_trace;
  91. static int __init set_raw_show_trace(char *str)
  92. {
  93. raw_show_trace = 1;
  94. return 1;
  95. }
  96. __setup("raw_show_trace", set_raw_show_trace);
  97. #endif
  98. static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
  99. {
  100. unsigned long sp = regs->regs[29];
  101. unsigned long ra = regs->regs[31];
  102. unsigned long pc = regs->cp0_epc;
  103. if (raw_show_trace || !__kernel_text_address(pc)) {
  104. show_raw_backtrace(sp);
  105. return;
  106. }
  107. printk("Call Trace:\n");
  108. do {
  109. print_ip_sym(pc);
  110. pc = unwind_stack(task, &sp, pc, &ra);
  111. } while (pc);
  112. printk("\n");
  113. }
  114. /*
  115. * This routine abuses get_user()/put_user() to reference pointers
  116. * with at least a bit of error checking ...
  117. */
  118. static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
  119. {
  120. const int field = 2 * sizeof(unsigned long);
  121. long stackdata;
  122. int i;
  123. unsigned long *sp = (unsigned long *)regs->regs[29];
  124. printk("Stack :");
  125. i = 0;
  126. while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  127. if (i && ((i % (64 / field)) == 0))
  128. printk("\n ");
  129. if (i > 39) {
  130. printk(" ...");
  131. break;
  132. }
  133. if (__get_user(stackdata, sp++)) {
  134. printk(" (Bad stack address)");
  135. break;
  136. }
  137. printk(" %0*lx", field, stackdata);
  138. i++;
  139. }
  140. printk("\n");
  141. show_backtrace(task, regs);
  142. }
  143. void show_stack(struct task_struct *task, unsigned long *sp)
  144. {
  145. struct pt_regs regs;
  146. if (sp) {
  147. regs.regs[29] = (unsigned long)sp;
  148. regs.regs[31] = 0;
  149. regs.cp0_epc = 0;
  150. } else {
  151. if (task && task != current) {
  152. regs.regs[29] = task->thread.reg29;
  153. regs.regs[31] = 0;
  154. regs.cp0_epc = task->thread.reg31;
  155. } else {
  156. prepare_frametrace(&regs);
  157. }
  158. }
  159. show_stacktrace(task, &regs);
  160. }
  161. /*
  162. * The architecture-independent dump_stack generator
  163. */
  164. void dump_stack(void)
  165. {
  166. struct pt_regs regs;
  167. prepare_frametrace(&regs);
  168. show_backtrace(current, &regs);
  169. }
  170. EXPORT_SYMBOL(dump_stack);
  171. void show_code(unsigned int *pc)
  172. {
  173. long i;
  174. printk("\nCode:");
  175. for(i = -3 ; i < 6 ; i++) {
  176. unsigned int insn;
  177. if (__get_user(insn, pc + i)) {
  178. printk(" (Bad address in epc)\n");
  179. break;
  180. }
  181. printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
  182. }
  183. }
  184. void show_regs(struct pt_regs *regs)
  185. {
  186. const int field = 2 * sizeof(unsigned long);
  187. unsigned int cause = regs->cp0_cause;
  188. int i;
  189. printk("Cpu %d\n", smp_processor_id());
  190. /*
  191. * Saved main processor registers
  192. */
  193. for (i = 0; i < 32; ) {
  194. if ((i % 4) == 0)
  195. printk("$%2d :", i);
  196. if (i == 0)
  197. printk(" %0*lx", field, 0UL);
  198. else if (i == 26 || i == 27)
  199. printk(" %*s", field, "");
  200. else
  201. printk(" %0*lx", field, regs->regs[i]);
  202. i++;
  203. if ((i % 4) == 0)
  204. printk("\n");
  205. }
  206. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  207. printk("Acx : %0*lx\n", field, regs->acx);
  208. #endif
  209. printk("Hi : %0*lx\n", field, regs->hi);
  210. printk("Lo : %0*lx\n", field, regs->lo);
  211. /*
  212. * Saved cp0 registers
  213. */
  214. printk("epc : %0*lx ", field, regs->cp0_epc);
  215. print_symbol("%s ", regs->cp0_epc);
  216. printk(" %s\n", print_tainted());
  217. printk("ra : %0*lx ", field, regs->regs[31]);
  218. print_symbol("%s\n", regs->regs[31]);
  219. printk("Status: %08x ", (uint32_t) regs->cp0_status);
  220. if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
  221. if (regs->cp0_status & ST0_KUO)
  222. printk("KUo ");
  223. if (regs->cp0_status & ST0_IEO)
  224. printk("IEo ");
  225. if (regs->cp0_status & ST0_KUP)
  226. printk("KUp ");
  227. if (regs->cp0_status & ST0_IEP)
  228. printk("IEp ");
  229. if (regs->cp0_status & ST0_KUC)
  230. printk("KUc ");
  231. if (regs->cp0_status & ST0_IEC)
  232. printk("IEc ");
  233. } else {
  234. if (regs->cp0_status & ST0_KX)
  235. printk("KX ");
  236. if (regs->cp0_status & ST0_SX)
  237. printk("SX ");
  238. if (regs->cp0_status & ST0_UX)
  239. printk("UX ");
  240. switch (regs->cp0_status & ST0_KSU) {
  241. case KSU_USER:
  242. printk("USER ");
  243. break;
  244. case KSU_SUPERVISOR:
  245. printk("SUPERVISOR ");
  246. break;
  247. case KSU_KERNEL:
  248. printk("KERNEL ");
  249. break;
  250. default:
  251. printk("BAD_MODE ");
  252. break;
  253. }
  254. if (regs->cp0_status & ST0_ERL)
  255. printk("ERL ");
  256. if (regs->cp0_status & ST0_EXL)
  257. printk("EXL ");
  258. if (regs->cp0_status & ST0_IE)
  259. printk("IE ");
  260. }
  261. printk("\n");
  262. printk("Cause : %08x\n", cause);
  263. cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
  264. if (1 <= cause && cause <= 5)
  265. printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
  266. printk("PrId : %08x\n", read_c0_prid());
  267. }
  268. void show_registers(struct pt_regs *regs)
  269. {
  270. show_regs(regs);
  271. print_modules();
  272. printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
  273. current->comm, current->pid, current_thread_info(), current);
  274. show_stacktrace(current, regs);
  275. show_code((unsigned int *) regs->cp0_epc);
  276. printk("\n");
  277. }
  278. static DEFINE_SPINLOCK(die_lock);
  279. NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
  280. {
  281. static int die_counter;
  282. #ifdef CONFIG_MIPS_MT_SMTC
  283. unsigned long dvpret = dvpe();
  284. #endif /* CONFIG_MIPS_MT_SMTC */
  285. console_verbose();
  286. spin_lock_irq(&die_lock);
  287. bust_spinlocks(1);
  288. #ifdef CONFIG_MIPS_MT_SMTC
  289. mips_mt_regdump(dvpret);
  290. #endif /* CONFIG_MIPS_MT_SMTC */
  291. printk("%s[#%d]:\n", str, ++die_counter);
  292. show_registers(regs);
  293. spin_unlock_irq(&die_lock);
  294. if (in_interrupt())
  295. panic("Fatal exception in interrupt");
  296. if (panic_on_oops) {
  297. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  298. ssleep(5);
  299. panic("Fatal exception");
  300. }
  301. do_exit(SIGSEGV);
  302. }
  303. extern const struct exception_table_entry __start___dbe_table[];
  304. extern const struct exception_table_entry __stop___dbe_table[];
  305. __asm__(
  306. " .section __dbe_table, \"a\"\n"
  307. " .previous \n");
  308. /* Given an address, look for it in the exception tables. */
  309. static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
  310. {
  311. const struct exception_table_entry *e;
  312. e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
  313. if (!e)
  314. e = search_module_dbetables(addr);
  315. return e;
  316. }
  317. asmlinkage void do_be(struct pt_regs *regs)
  318. {
  319. const int field = 2 * sizeof(unsigned long);
  320. const struct exception_table_entry *fixup = NULL;
  321. int data = regs->cp0_cause & 4;
  322. int action = MIPS_BE_FATAL;
  323. /* XXX For now. Fixme, this searches the wrong table ... */
  324. if (data && !user_mode(regs))
  325. fixup = search_dbe_tables(exception_epc(regs));
  326. if (fixup)
  327. action = MIPS_BE_FIXUP;
  328. if (board_be_handler)
  329. action = board_be_handler(regs, fixup != 0);
  330. switch (action) {
  331. case MIPS_BE_DISCARD:
  332. return;
  333. case MIPS_BE_FIXUP:
  334. if (fixup) {
  335. regs->cp0_epc = fixup->nextinsn;
  336. return;
  337. }
  338. break;
  339. default:
  340. break;
  341. }
  342. /*
  343. * Assume it would be too dangerous to continue ...
  344. */
  345. printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
  346. data ? "Data" : "Instruction",
  347. field, regs->cp0_epc, field, regs->regs[31]);
  348. die_if_kernel("Oops", regs);
  349. force_sig(SIGBUS, current);
  350. }
  351. /*
  352. * ll/sc emulation
  353. */
  354. #define OPCODE 0xfc000000
  355. #define BASE 0x03e00000
  356. #define RT 0x001f0000
  357. #define OFFSET 0x0000ffff
  358. #define LL 0xc0000000
  359. #define SC 0xe0000000
  360. #define SPEC3 0x7c000000
  361. #define RD 0x0000f800
  362. #define FUNC 0x0000003f
  363. #define RDHWR 0x0000003b
  364. /*
  365. * The ll_bit is cleared by r*_switch.S
  366. */
  367. unsigned long ll_bit;
  368. static struct task_struct *ll_task = NULL;
  369. static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
  370. {
  371. unsigned long value, __user *vaddr;
  372. long offset;
  373. int signal = 0;
  374. /*
  375. * analyse the ll instruction that just caused a ri exception
  376. * and put the referenced address to addr.
  377. */
  378. /* sign extend offset */
  379. offset = opcode & OFFSET;
  380. offset <<= 16;
  381. offset >>= 16;
  382. vaddr = (unsigned long __user *)
  383. ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
  384. if ((unsigned long)vaddr & 3) {
  385. signal = SIGBUS;
  386. goto sig;
  387. }
  388. if (get_user(value, vaddr)) {
  389. signal = SIGSEGV;
  390. goto sig;
  391. }
  392. preempt_disable();
  393. if (ll_task == NULL || ll_task == current) {
  394. ll_bit = 1;
  395. } else {
  396. ll_bit = 0;
  397. }
  398. ll_task = current;
  399. preempt_enable();
  400. compute_return_epc(regs);
  401. regs->regs[(opcode & RT) >> 16] = value;
  402. return;
  403. sig:
  404. force_sig(signal, current);
  405. }
  406. static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
  407. {
  408. unsigned long __user *vaddr;
  409. unsigned long reg;
  410. long offset;
  411. int signal = 0;
  412. /*
  413. * analyse the sc instruction that just caused a ri exception
  414. * and put the referenced address to addr.
  415. */
  416. /* sign extend offset */
  417. offset = opcode & OFFSET;
  418. offset <<= 16;
  419. offset >>= 16;
  420. vaddr = (unsigned long __user *)
  421. ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
  422. reg = (opcode & RT) >> 16;
  423. if ((unsigned long)vaddr & 3) {
  424. signal = SIGBUS;
  425. goto sig;
  426. }
  427. preempt_disable();
  428. if (ll_bit == 0 || ll_task != current) {
  429. compute_return_epc(regs);
  430. regs->regs[reg] = 0;
  431. preempt_enable();
  432. return;
  433. }
  434. preempt_enable();
  435. if (put_user(regs->regs[reg], vaddr)) {
  436. signal = SIGSEGV;
  437. goto sig;
  438. }
  439. compute_return_epc(regs);
  440. regs->regs[reg] = 1;
  441. return;
  442. sig:
  443. force_sig(signal, current);
  444. }
  445. /*
  446. * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
  447. * opcodes are supposed to result in coprocessor unusable exceptions if
  448. * executed on ll/sc-less processors. That's the theory. In practice a
  449. * few processors such as NEC's VR4100 throw reserved instruction exceptions
  450. * instead, so we're doing the emulation thing in both exception handlers.
  451. */
  452. static inline int simulate_llsc(struct pt_regs *regs)
  453. {
  454. unsigned int opcode;
  455. if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  456. goto out_sigsegv;
  457. if ((opcode & OPCODE) == LL) {
  458. simulate_ll(regs, opcode);
  459. return 0;
  460. }
  461. if ((opcode & OPCODE) == SC) {
  462. simulate_sc(regs, opcode);
  463. return 0;
  464. }
  465. return -EFAULT; /* Strange things going on ... */
  466. out_sigsegv:
  467. force_sig(SIGSEGV, current);
  468. return -EFAULT;
  469. }
  470. /*
  471. * Simulate trapping 'rdhwr' instructions to provide user accessible
  472. * registers not implemented in hardware. The only current use of this
  473. * is the thread area pointer.
  474. */
  475. static inline int simulate_rdhwr(struct pt_regs *regs)
  476. {
  477. struct thread_info *ti = task_thread_info(current);
  478. unsigned int opcode;
  479. if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  480. goto out_sigsegv;
  481. if (unlikely(compute_return_epc(regs)))
  482. return -EFAULT;
  483. if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
  484. int rd = (opcode & RD) >> 11;
  485. int rt = (opcode & RT) >> 16;
  486. switch (rd) {
  487. case 29:
  488. regs->regs[rt] = ti->tp_value;
  489. return 0;
  490. default:
  491. return -EFAULT;
  492. }
  493. }
  494. /* Not ours. */
  495. return -EFAULT;
  496. out_sigsegv:
  497. force_sig(SIGSEGV, current);
  498. return -EFAULT;
  499. }
  500. asmlinkage void do_ov(struct pt_regs *regs)
  501. {
  502. siginfo_t info;
  503. die_if_kernel("Integer overflow", regs);
  504. info.si_code = FPE_INTOVF;
  505. info.si_signo = SIGFPE;
  506. info.si_errno = 0;
  507. info.si_addr = (void __user *) regs->cp0_epc;
  508. force_sig_info(SIGFPE, &info, current);
  509. }
  510. /*
  511. * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
  512. */
  513. asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
  514. {
  515. die_if_kernel("FP exception in kernel code", regs);
  516. if (fcr31 & FPU_CSR_UNI_X) {
  517. int sig;
  518. /*
  519. * Unimplemented operation exception. If we've got the full
  520. * software emulator on-board, let's use it...
  521. *
  522. * Force FPU to dump state into task/thread context. We're
  523. * moving a lot of data here for what is probably a single
  524. * instruction, but the alternative is to pre-decode the FP
  525. * register operands before invoking the emulator, which seems
  526. * a bit extreme for what should be an infrequent event.
  527. */
  528. /* Ensure 'resume' not overwrite saved fp context again. */
  529. lose_fpu(1);
  530. /* Run the emulator */
  531. sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
  532. /*
  533. * We can't allow the emulated instruction to leave any of
  534. * the cause bit set in $fcr31.
  535. */
  536. current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
  537. /* Restore the hardware register state */
  538. own_fpu(1); /* Using the FPU again. */
  539. /* If something went wrong, signal */
  540. if (sig)
  541. force_sig(sig, current);
  542. return;
  543. }
  544. force_sig(SIGFPE, current);
  545. }
  546. asmlinkage void do_bp(struct pt_regs *regs)
  547. {
  548. unsigned int opcode, bcode;
  549. siginfo_t info;
  550. if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  551. goto out_sigsegv;
  552. /*
  553. * There is the ancient bug in the MIPS assemblers that the break
  554. * code starts left to bit 16 instead to bit 6 in the opcode.
  555. * Gas is bug-compatible, but not always, grrr...
  556. * We handle both cases with a simple heuristics. --macro
  557. */
  558. bcode = ((opcode >> 6) & ((1 << 20) - 1));
  559. if (bcode < (1 << 10))
  560. bcode <<= 10;
  561. /*
  562. * (A short test says that IRIX 5.3 sends SIGTRAP for all break
  563. * insns, even for break codes that indicate arithmetic failures.
  564. * Weird ...)
  565. * But should we continue the brokenness??? --macro
  566. */
  567. switch (bcode) {
  568. case BRK_OVERFLOW << 10:
  569. case BRK_DIVZERO << 10:
  570. die_if_kernel("Break instruction in kernel code", regs);
  571. if (bcode == (BRK_DIVZERO << 10))
  572. info.si_code = FPE_INTDIV;
  573. else
  574. info.si_code = FPE_INTOVF;
  575. info.si_signo = SIGFPE;
  576. info.si_errno = 0;
  577. info.si_addr = (void __user *) regs->cp0_epc;
  578. force_sig_info(SIGFPE, &info, current);
  579. break;
  580. case BRK_BUG:
  581. die("Kernel bug detected", regs);
  582. break;
  583. default:
  584. die_if_kernel("Break instruction in kernel code", regs);
  585. force_sig(SIGTRAP, current);
  586. }
  587. return;
  588. out_sigsegv:
  589. force_sig(SIGSEGV, current);
  590. }
  591. asmlinkage void do_tr(struct pt_regs *regs)
  592. {
  593. unsigned int opcode, tcode = 0;
  594. siginfo_t info;
  595. if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  596. goto out_sigsegv;
  597. /* Immediate versions don't provide a code. */
  598. if (!(opcode & OPCODE))
  599. tcode = ((opcode >> 6) & ((1 << 10) - 1));
  600. /*
  601. * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
  602. * insns, even for trap codes that indicate arithmetic failures.
  603. * Weird ...)
  604. * But should we continue the brokenness??? --macro
  605. */
  606. switch (tcode) {
  607. case BRK_OVERFLOW:
  608. case BRK_DIVZERO:
  609. die_if_kernel("Trap instruction in kernel code", regs);
  610. if (tcode == BRK_DIVZERO)
  611. info.si_code = FPE_INTDIV;
  612. else
  613. info.si_code = FPE_INTOVF;
  614. info.si_signo = SIGFPE;
  615. info.si_errno = 0;
  616. info.si_addr = (void __user *) regs->cp0_epc;
  617. force_sig_info(SIGFPE, &info, current);
  618. break;
  619. case BRK_BUG:
  620. die("Kernel bug detected", regs);
  621. break;
  622. default:
  623. die_if_kernel("Trap instruction in kernel code", regs);
  624. force_sig(SIGTRAP, current);
  625. }
  626. return;
  627. out_sigsegv:
  628. force_sig(SIGSEGV, current);
  629. }
  630. asmlinkage void do_ri(struct pt_regs *regs)
  631. {
  632. die_if_kernel("Reserved instruction in kernel code", regs);
  633. if (!cpu_has_llsc)
  634. if (!simulate_llsc(regs))
  635. return;
  636. if (!simulate_rdhwr(regs))
  637. return;
  638. force_sig(SIGILL, current);
  639. }
  640. asmlinkage void do_cpu(struct pt_regs *regs)
  641. {
  642. unsigned int cpid;
  643. die_if_kernel("do_cpu invoked from kernel context!", regs);
  644. cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
  645. switch (cpid) {
  646. case 0:
  647. if (!cpu_has_llsc)
  648. if (!simulate_llsc(regs))
  649. return;
  650. if (!simulate_rdhwr(regs))
  651. return;
  652. break;
  653. case 1:
  654. if (used_math()) /* Using the FPU again. */
  655. own_fpu(1);
  656. else { /* First time FPU user. */
  657. init_fpu();
  658. set_used_math();
  659. }
  660. if (!raw_cpu_has_fpu) {
  661. int sig;
  662. sig = fpu_emulator_cop1Handler(regs,
  663. &current->thread.fpu, 0);
  664. if (sig)
  665. force_sig(sig, current);
  666. #ifdef CONFIG_MIPS_MT_FPAFF
  667. else {
  668. /*
  669. * MIPS MT processors may have fewer FPU contexts
  670. * than CPU threads. If we've emulated more than
  671. * some threshold number of instructions, force
  672. * migration to a "CPU" that has FP support.
  673. */
  674. if(mt_fpemul_threshold > 0
  675. && ((current->thread.emulated_fp++
  676. > mt_fpemul_threshold))) {
  677. /*
  678. * If there's no FPU present, or if the
  679. * application has already restricted
  680. * the allowed set to exclude any CPUs
  681. * with FPUs, we'll skip the procedure.
  682. */
  683. if (cpus_intersects(current->cpus_allowed,
  684. mt_fpu_cpumask)) {
  685. cpumask_t tmask;
  686. cpus_and(tmask,
  687. current->thread.user_cpus_allowed,
  688. mt_fpu_cpumask);
  689. set_cpus_allowed(current, tmask);
  690. current->thread.mflags |= MF_FPUBOUND;
  691. }
  692. }
  693. }
  694. #endif /* CONFIG_MIPS_MT_FPAFF */
  695. }
  696. return;
  697. case 2:
  698. case 3:
  699. break;
  700. }
  701. force_sig(SIGILL, current);
  702. }
  703. asmlinkage void do_mdmx(struct pt_regs *regs)
  704. {
  705. force_sig(SIGILL, current);
  706. }
  707. asmlinkage void do_watch(struct pt_regs *regs)
  708. {
  709. /*
  710. * We use the watch exception where available to detect stack
  711. * overflows.
  712. */
  713. dump_tlb_all();
  714. show_regs(regs);
  715. panic("Caught WATCH exception - probably caused by stack overflow.");
  716. }
  717. asmlinkage void do_mcheck(struct pt_regs *regs)
  718. {
  719. const int field = 2 * sizeof(unsigned long);
  720. int multi_match = regs->cp0_status & ST0_TS;
  721. show_regs(regs);
  722. if (multi_match) {
  723. printk("Index : %0x\n", read_c0_index());
  724. printk("Pagemask: %0x\n", read_c0_pagemask());
  725. printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
  726. printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
  727. printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
  728. printk("\n");
  729. dump_tlb_all();
  730. }
  731. show_code((unsigned int *) regs->cp0_epc);
  732. /*
  733. * Some chips may have other causes of machine check (e.g. SB1
  734. * graduation timer)
  735. */
  736. panic("Caught Machine Check exception - %scaused by multiple "
  737. "matching entries in the TLB.",
  738. (multi_match) ? "" : "not ");
  739. }
  740. asmlinkage void do_mt(struct pt_regs *regs)
  741. {
  742. int subcode;
  743. subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
  744. >> VPECONTROL_EXCPT_SHIFT;
  745. switch (subcode) {
  746. case 0:
  747. printk(KERN_DEBUG "Thread Underflow\n");
  748. break;
  749. case 1:
  750. printk(KERN_DEBUG "Thread Overflow\n");
  751. break;
  752. case 2:
  753. printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
  754. break;
  755. case 3:
  756. printk(KERN_DEBUG "Gating Storage Exception\n");
  757. break;
  758. case 4:
  759. printk(KERN_DEBUG "YIELD Scheduler Exception\n");
  760. break;
  761. case 5:
  762. printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
  763. break;
  764. default:
  765. printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
  766. subcode);
  767. break;
  768. }
  769. die_if_kernel("MIPS MT Thread exception in kernel", regs);
  770. force_sig(SIGILL, current);
  771. }
  772. asmlinkage void do_dsp(struct pt_regs *regs)
  773. {
  774. if (cpu_has_dsp)
  775. panic("Unexpected DSP exception\n");
  776. force_sig(SIGILL, current);
  777. }
  778. asmlinkage void do_reserved(struct pt_regs *regs)
  779. {
  780. /*
  781. * Game over - no way to handle this if it ever occurs. Most probably
  782. * caused by a new unknown cpu type or after another deadly
  783. * hard/software error.
  784. */
  785. show_regs(regs);
  786. panic("Caught reserved exception %ld - should not happen.",
  787. (regs->cp0_cause & 0x7f) >> 2);
  788. }
  789. /*
  790. * Some MIPS CPUs can enable/disable for cache parity detection, but do
  791. * it different ways.
  792. */
  793. static inline void parity_protection_init(void)
  794. {
  795. switch (current_cpu_data.cputype) {
  796. case CPU_24K:
  797. case CPU_34K:
  798. case CPU_5KC:
  799. write_c0_ecc(0x80000000);
  800. back_to_back_c0_hazard();
  801. /* Set the PE bit (bit 31) in the c0_errctl register. */
  802. printk(KERN_INFO "Cache parity protection %sabled\n",
  803. (read_c0_ecc() & 0x80000000) ? "en" : "dis");
  804. break;
  805. case CPU_20KC:
  806. case CPU_25KF:
  807. /* Clear the DE bit (bit 16) in the c0_status register. */
  808. printk(KERN_INFO "Enable cache parity protection for "
  809. "MIPS 20KC/25KF CPUs.\n");
  810. clear_c0_status(ST0_DE);
  811. break;
  812. default:
  813. break;
  814. }
  815. }
  816. asmlinkage void cache_parity_error(void)
  817. {
  818. const int field = 2 * sizeof(unsigned long);
  819. unsigned int reg_val;
  820. /* For the moment, report the problem and hang. */
  821. printk("Cache error exception:\n");
  822. printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
  823. reg_val = read_c0_cacheerr();
  824. printk("c0_cacheerr == %08x\n", reg_val);
  825. printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
  826. reg_val & (1<<30) ? "secondary" : "primary",
  827. reg_val & (1<<31) ? "data" : "insn");
  828. printk("Error bits: %s%s%s%s%s%s%s\n",
  829. reg_val & (1<<29) ? "ED " : "",
  830. reg_val & (1<<28) ? "ET " : "",
  831. reg_val & (1<<26) ? "EE " : "",
  832. reg_val & (1<<25) ? "EB " : "",
  833. reg_val & (1<<24) ? "EI " : "",
  834. reg_val & (1<<23) ? "E1 " : "",
  835. reg_val & (1<<22) ? "E0 " : "");
  836. printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
  837. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  838. if (reg_val & (1<<22))
  839. printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
  840. if (reg_val & (1<<23))
  841. printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
  842. #endif
  843. panic("Can't handle the cache error!");
  844. }
  845. /*
  846. * SDBBP EJTAG debug exception handler.
  847. * We skip the instruction and return to the next instruction.
  848. */
  849. void ejtag_exception_handler(struct pt_regs *regs)
  850. {
  851. const int field = 2 * sizeof(unsigned long);
  852. unsigned long depc, old_epc;
  853. unsigned int debug;
  854. printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
  855. depc = read_c0_depc();
  856. debug = read_c0_debug();
  857. printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
  858. if (debug & 0x80000000) {
  859. /*
  860. * In branch delay slot.
  861. * We cheat a little bit here and use EPC to calculate the
  862. * debug return address (DEPC). EPC is restored after the
  863. * calculation.
  864. */
  865. old_epc = regs->cp0_epc;
  866. regs->cp0_epc = depc;
  867. __compute_return_epc(regs);
  868. depc = regs->cp0_epc;
  869. regs->cp0_epc = old_epc;
  870. } else
  871. depc += 4;
  872. write_c0_depc(depc);
  873. #if 0
  874. printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
  875. write_c0_debug(debug | 0x100);
  876. #endif
  877. }
  878. /*
  879. * NMI exception handler.
  880. */
  881. void nmi_exception_handler(struct pt_regs *regs)
  882. {
  883. #ifdef CONFIG_MIPS_MT_SMTC
  884. unsigned long dvpret = dvpe();
  885. bust_spinlocks(1);
  886. printk("NMI taken!!!!\n");
  887. mips_mt_regdump(dvpret);
  888. #else
  889. bust_spinlocks(1);
  890. printk("NMI taken!!!!\n");
  891. #endif /* CONFIG_MIPS_MT_SMTC */
  892. die("NMI", regs);
  893. while(1) ;
  894. }
  895. #define VECTORSPACING 0x100 /* for EI/VI mode */
  896. unsigned long ebase;
  897. unsigned long exception_handlers[32];
  898. unsigned long vi_handlers[64];
  899. /*
  900. * As a side effect of the way this is implemented we're limited
  901. * to interrupt handlers in the address range from
  902. * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
  903. */
  904. void *set_except_vector(int n, void *addr)
  905. {
  906. unsigned long handler = (unsigned long) addr;
  907. unsigned long old_handler = exception_handlers[n];
  908. exception_handlers[n] = handler;
  909. if (n == 0 && cpu_has_divec) {
  910. *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
  911. (0x03ffffff & (handler >> 2));
  912. flush_icache_range(ebase + 0x200, ebase + 0x204);
  913. }
  914. return (void *)old_handler;
  915. }
  916. #ifdef CONFIG_CPU_MIPSR2_SRS
  917. /*
  918. * MIPSR2 shadow register set allocation
  919. * FIXME: SMP...
  920. */
  921. static struct shadow_registers {
  922. /*
  923. * Number of shadow register sets supported
  924. */
  925. unsigned long sr_supported;
  926. /*
  927. * Bitmap of allocated shadow registers
  928. */
  929. unsigned long sr_allocated;
  930. } shadow_registers;
  931. static void mips_srs_init(void)
  932. {
  933. shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
  934. printk(KERN_INFO "%ld MIPSR2 register sets available\n",
  935. shadow_registers.sr_supported);
  936. shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
  937. }
  938. int mips_srs_max(void)
  939. {
  940. return shadow_registers.sr_supported;
  941. }
  942. int mips_srs_alloc(void)
  943. {
  944. struct shadow_registers *sr = &shadow_registers;
  945. int set;
  946. again:
  947. set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
  948. if (set >= sr->sr_supported)
  949. return -1;
  950. if (test_and_set_bit(set, &sr->sr_allocated))
  951. goto again;
  952. return set;
  953. }
  954. void mips_srs_free(int set)
  955. {
  956. struct shadow_registers *sr = &shadow_registers;
  957. clear_bit(set, &sr->sr_allocated);
  958. }
  959. static asmlinkage void do_default_vi(void)
  960. {
  961. show_regs(get_irq_regs());
  962. panic("Caught unexpected vectored interrupt.");
  963. }
  964. static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
  965. {
  966. unsigned long handler;
  967. unsigned long old_handler = vi_handlers[n];
  968. u32 *w;
  969. unsigned char *b;
  970. if (!cpu_has_veic && !cpu_has_vint)
  971. BUG();
  972. if (addr == NULL) {
  973. handler = (unsigned long) do_default_vi;
  974. srs = 0;
  975. } else
  976. handler = (unsigned long) addr;
  977. vi_handlers[n] = (unsigned long) addr;
  978. b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
  979. if (srs >= mips_srs_max())
  980. panic("Shadow register set %d not supported", srs);
  981. if (cpu_has_veic) {
  982. if (board_bind_eic_interrupt)
  983. board_bind_eic_interrupt (n, srs);
  984. } else if (cpu_has_vint) {
  985. /* SRSMap is only defined if shadow sets are implemented */
  986. if (mips_srs_max() > 1)
  987. change_c0_srsmap (0xf << n*4, srs << n*4);
  988. }
  989. if (srs == 0) {
  990. /*
  991. * If no shadow set is selected then use the default handler
  992. * that does normal register saving and a standard interrupt exit
  993. */
  994. extern char except_vec_vi, except_vec_vi_lui;
  995. extern char except_vec_vi_ori, except_vec_vi_end;
  996. #ifdef CONFIG_MIPS_MT_SMTC
  997. /*
  998. * We need to provide the SMTC vectored interrupt handler
  999. * not only with the address of the handler, but with the
  1000. * Status.IM bit to be masked before going there.
  1001. */
  1002. extern char except_vec_vi_mori;
  1003. const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
  1004. #endif /* CONFIG_MIPS_MT_SMTC */
  1005. const int handler_len = &except_vec_vi_end - &except_vec_vi;
  1006. const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
  1007. const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
  1008. if (handler_len > VECTORSPACING) {
  1009. /*
  1010. * Sigh... panicing won't help as the console
  1011. * is probably not configured :(
  1012. */
  1013. panic ("VECTORSPACING too small");
  1014. }
  1015. memcpy (b, &except_vec_vi, handler_len);
  1016. #ifdef CONFIG_MIPS_MT_SMTC
  1017. BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
  1018. w = (u32 *)(b + mori_offset);
  1019. *w = (*w & 0xffff0000) | (0x100 << n);
  1020. #endif /* CONFIG_MIPS_MT_SMTC */
  1021. w = (u32 *)(b + lui_offset);
  1022. *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
  1023. w = (u32 *)(b + ori_offset);
  1024. *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
  1025. flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
  1026. }
  1027. else {
  1028. /*
  1029. * In other cases jump directly to the interrupt handler
  1030. *
  1031. * It is the handlers responsibility to save registers if required
  1032. * (eg hi/lo) and return from the exception using "eret"
  1033. */
  1034. w = (u32 *)b;
  1035. *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
  1036. *w = 0;
  1037. flush_icache_range((unsigned long)b, (unsigned long)(b+8));
  1038. }
  1039. return (void *)old_handler;
  1040. }
  1041. void *set_vi_handler(int n, vi_handler_t addr)
  1042. {
  1043. return set_vi_srs_handler(n, addr, 0);
  1044. }
  1045. #else
  1046. static inline void mips_srs_init(void)
  1047. {
  1048. }
  1049. #endif /* CONFIG_CPU_MIPSR2_SRS */
  1050. /*
  1051. * This is used by native signal handling
  1052. */
  1053. asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
  1054. asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
  1055. extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
  1056. extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
  1057. extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
  1058. extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
  1059. #ifdef CONFIG_SMP
  1060. static int smp_save_fp_context(struct sigcontext __user *sc)
  1061. {
  1062. return raw_cpu_has_fpu
  1063. ? _save_fp_context(sc)
  1064. : fpu_emulator_save_context(sc);
  1065. }
  1066. static int smp_restore_fp_context(struct sigcontext __user *sc)
  1067. {
  1068. return raw_cpu_has_fpu
  1069. ? _restore_fp_context(sc)
  1070. : fpu_emulator_restore_context(sc);
  1071. }
  1072. #endif
  1073. static inline void signal_init(void)
  1074. {
  1075. #ifdef CONFIG_SMP
  1076. /* For now just do the cpu_has_fpu check when the functions are invoked */
  1077. save_fp_context = smp_save_fp_context;
  1078. restore_fp_context = smp_restore_fp_context;
  1079. #else
  1080. if (cpu_has_fpu) {
  1081. save_fp_context = _save_fp_context;
  1082. restore_fp_context = _restore_fp_context;
  1083. } else {
  1084. save_fp_context = fpu_emulator_save_context;
  1085. restore_fp_context = fpu_emulator_restore_context;
  1086. }
  1087. #endif
  1088. }
  1089. #ifdef CONFIG_MIPS32_COMPAT
  1090. /*
  1091. * This is used by 32-bit signal stuff on the 64-bit kernel
  1092. */
  1093. asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
  1094. asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
  1095. extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
  1096. extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
  1097. extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
  1098. extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
  1099. static inline void signal32_init(void)
  1100. {
  1101. if (cpu_has_fpu) {
  1102. save_fp_context32 = _save_fp_context32;
  1103. restore_fp_context32 = _restore_fp_context32;
  1104. } else {
  1105. save_fp_context32 = fpu_emulator_save_context32;
  1106. restore_fp_context32 = fpu_emulator_restore_context32;
  1107. }
  1108. }
  1109. #endif
  1110. extern void cpu_cache_init(void);
  1111. extern void tlb_init(void);
  1112. extern void flush_tlb_handlers(void);
  1113. void __init per_cpu_trap_init(void)
  1114. {
  1115. unsigned int cpu = smp_processor_id();
  1116. unsigned int status_set = ST0_CU0;
  1117. #ifdef CONFIG_MIPS_MT_SMTC
  1118. int secondaryTC = 0;
  1119. int bootTC = (cpu == 0);
  1120. /*
  1121. * Only do per_cpu_trap_init() for first TC of Each VPE.
  1122. * Note that this hack assumes that the SMTC init code
  1123. * assigns TCs consecutively and in ascending order.
  1124. */
  1125. if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
  1126. ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
  1127. secondaryTC = 1;
  1128. #endif /* CONFIG_MIPS_MT_SMTC */
  1129. /*
  1130. * Disable coprocessors and select 32-bit or 64-bit addressing
  1131. * and the 16/32 or 32/32 FPR register model. Reset the BEV
  1132. * flag that some firmware may have left set and the TS bit (for
  1133. * IP27). Set XX for ISA IV code to work.
  1134. */
  1135. #ifdef CONFIG_64BIT
  1136. status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
  1137. #endif
  1138. if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
  1139. status_set |= ST0_XX;
  1140. change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
  1141. status_set);
  1142. if (cpu_has_dsp)
  1143. set_c0_status(ST0_MX);
  1144. #ifdef CONFIG_CPU_MIPSR2
  1145. write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
  1146. #endif
  1147. #ifdef CONFIG_MIPS_MT_SMTC
  1148. if (!secondaryTC) {
  1149. #endif /* CONFIG_MIPS_MT_SMTC */
  1150. if (cpu_has_veic || cpu_has_vint) {
  1151. write_c0_ebase (ebase);
  1152. /* Setting vector spacing enables EI/VI mode */
  1153. change_c0_intctl (0x3e0, VECTORSPACING);
  1154. }
  1155. if (cpu_has_divec) {
  1156. if (cpu_has_mipsmt) {
  1157. unsigned int vpflags = dvpe();
  1158. set_c0_cause(CAUSEF_IV);
  1159. evpe(vpflags);
  1160. } else
  1161. set_c0_cause(CAUSEF_IV);
  1162. }
  1163. /*
  1164. * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
  1165. *
  1166. * o read IntCtl.IPTI to determine the timer interrupt
  1167. * o read IntCtl.IPPCI to determine the performance counter interrupt
  1168. */
  1169. if (cpu_has_mips_r2) {
  1170. cp0_compare_irq = (read_c0_intctl () >> 29) & 7;
  1171. cp0_perfcount_irq = (read_c0_intctl () >> 26) & 7;
  1172. if (cp0_perfcount_irq == cp0_compare_irq)
  1173. cp0_perfcount_irq = -1;
  1174. } else {
  1175. cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
  1176. cp0_perfcount_irq = -1;
  1177. }
  1178. #ifdef CONFIG_MIPS_MT_SMTC
  1179. }
  1180. #endif /* CONFIG_MIPS_MT_SMTC */
  1181. cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
  1182. TLBMISS_HANDLER_SETUP();
  1183. atomic_inc(&init_mm.mm_count);
  1184. current->active_mm = &init_mm;
  1185. BUG_ON(current->mm);
  1186. enter_lazy_tlb(&init_mm, current);
  1187. #ifdef CONFIG_MIPS_MT_SMTC
  1188. if (bootTC) {
  1189. #endif /* CONFIG_MIPS_MT_SMTC */
  1190. cpu_cache_init();
  1191. tlb_init();
  1192. #ifdef CONFIG_MIPS_MT_SMTC
  1193. } else if (!secondaryTC) {
  1194. /*
  1195. * First TC in non-boot VPE must do subset of tlb_init()
  1196. * for MMU countrol registers.
  1197. */
  1198. write_c0_pagemask(PM_DEFAULT_MASK);
  1199. write_c0_wired(0);
  1200. }
  1201. #endif /* CONFIG_MIPS_MT_SMTC */
  1202. }
  1203. /* Install CPU exception handler */
  1204. void __init set_handler (unsigned long offset, void *addr, unsigned long size)
  1205. {
  1206. memcpy((void *)(ebase + offset), addr, size);
  1207. flush_icache_range(ebase + offset, ebase + offset + size);
  1208. }
  1209. /* Install uncached CPU exception handler */
  1210. void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
  1211. {
  1212. #ifdef CONFIG_32BIT
  1213. unsigned long uncached_ebase = KSEG1ADDR(ebase);
  1214. #endif
  1215. #ifdef CONFIG_64BIT
  1216. unsigned long uncached_ebase = TO_UNCAC(ebase);
  1217. #endif
  1218. memcpy((void *)(uncached_ebase + offset), addr, size);
  1219. }
  1220. static int __initdata rdhwr_noopt;
  1221. static int __init set_rdhwr_noopt(char *str)
  1222. {
  1223. rdhwr_noopt = 1;
  1224. return 1;
  1225. }
  1226. __setup("rdhwr_noopt", set_rdhwr_noopt);
  1227. void __init trap_init(void)
  1228. {
  1229. extern char except_vec3_generic, except_vec3_r4000;
  1230. extern char except_vec4;
  1231. unsigned long i;
  1232. if (cpu_has_veic || cpu_has_vint)
  1233. ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
  1234. else
  1235. ebase = CAC_BASE;
  1236. mips_srs_init();
  1237. per_cpu_trap_init();
  1238. /*
  1239. * Copy the generic exception handlers to their final destination.
  1240. * This will be overriden later as suitable for a particular
  1241. * configuration.
  1242. */
  1243. set_handler(0x180, &except_vec3_generic, 0x80);
  1244. /*
  1245. * Setup default vectors
  1246. */
  1247. for (i = 0; i <= 31; i++)
  1248. set_except_vector(i, handle_reserved);
  1249. /*
  1250. * Copy the EJTAG debug exception vector handler code to it's final
  1251. * destination.
  1252. */
  1253. if (cpu_has_ejtag && board_ejtag_handler_setup)
  1254. board_ejtag_handler_setup ();
  1255. /*
  1256. * Only some CPUs have the watch exceptions.
  1257. */
  1258. if (cpu_has_watch)
  1259. set_except_vector(23, handle_watch);
  1260. /*
  1261. * Initialise interrupt handlers
  1262. */
  1263. if (cpu_has_veic || cpu_has_vint) {
  1264. int nvec = cpu_has_veic ? 64 : 8;
  1265. for (i = 0; i < nvec; i++)
  1266. set_vi_handler(i, NULL);
  1267. }
  1268. else if (cpu_has_divec)
  1269. set_handler(0x200, &except_vec4, 0x8);
  1270. /*
  1271. * Some CPUs can enable/disable for cache parity detection, but does
  1272. * it different ways.
  1273. */
  1274. parity_protection_init();
  1275. /*
  1276. * The Data Bus Errors / Instruction Bus Errors are signaled
  1277. * by external hardware. Therefore these two exceptions
  1278. * may have board specific handlers.
  1279. */
  1280. if (board_be_init)
  1281. board_be_init();
  1282. set_except_vector(0, handle_int);
  1283. set_except_vector(1, handle_tlbm);
  1284. set_except_vector(2, handle_tlbl);
  1285. set_except_vector(3, handle_tlbs);
  1286. set_except_vector(4, handle_adel);
  1287. set_except_vector(5, handle_ades);
  1288. set_except_vector(6, handle_ibe);
  1289. set_except_vector(7, handle_dbe);
  1290. set_except_vector(8, handle_sys);
  1291. set_except_vector(9, handle_bp);
  1292. set_except_vector(10, rdhwr_noopt ? handle_ri :
  1293. (cpu_has_vtag_icache ?
  1294. handle_ri_rdhwr_vivt : handle_ri_rdhwr));
  1295. set_except_vector(11, handle_cpu);
  1296. set_except_vector(12, handle_ov);
  1297. set_except_vector(13, handle_tr);
  1298. if (current_cpu_data.cputype == CPU_R6000 ||
  1299. current_cpu_data.cputype == CPU_R6000A) {
  1300. /*
  1301. * The R6000 is the only R-series CPU that features a machine
  1302. * check exception (similar to the R4000 cache error) and
  1303. * unaligned ldc1/sdc1 exception. The handlers have not been
  1304. * written yet. Well, anyway there is no R6000 machine on the
  1305. * current list of targets for Linux/MIPS.
  1306. * (Duh, crap, there is someone with a triple R6k machine)
  1307. */
  1308. //set_except_vector(14, handle_mc);
  1309. //set_except_vector(15, handle_ndc);
  1310. }
  1311. if (board_nmi_handler_setup)
  1312. board_nmi_handler_setup();
  1313. if (cpu_has_fpu && !cpu_has_nofpuex)
  1314. set_except_vector(15, handle_fpe);
  1315. set_except_vector(22, handle_mdmx);
  1316. if (cpu_has_mcheck)
  1317. set_except_vector(24, handle_mcheck);
  1318. if (cpu_has_mipsmt)
  1319. set_except_vector(25, handle_mt);
  1320. set_except_vector(26, handle_dsp);
  1321. if (cpu_has_vce)
  1322. /* Special exception: R4[04]00 uses also the divec space. */
  1323. memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
  1324. else if (cpu_has_4kex)
  1325. memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
  1326. else
  1327. memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
  1328. signal_init();
  1329. #ifdef CONFIG_MIPS32_COMPAT
  1330. signal32_init();
  1331. #endif
  1332. flush_icache_range(ebase, ebase + 0x400);
  1333. flush_tlb_handlers();
  1334. }