traps.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
  7. * Copyright (C) 1995, 1996 Paul M. Antoine
  8. * Copyright (C) 1998 Ulf Carlsson
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11. * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  12. * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
  13. */
  14. #include <linux/bug.h>
  15. #include <linux/compiler.h>
  16. #include <linux/init.h>
  17. #include <linux/mm.h>
  18. #include <linux/module.h>
  19. #include <linux/sched.h>
  20. #include <linux/smp.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/kallsyms.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/kgdb.h>
  27. #include <linux/kdebug.h>
  28. #include <linux/notifier.h>
  29. #include <asm/bootinfo.h>
  30. #include <asm/branch.h>
  31. #include <asm/break.h>
  32. #include <asm/cop2.h>
  33. #include <asm/cpu.h>
  34. #include <asm/dsp.h>
  35. #include <asm/fpu.h>
  36. #include <asm/fpu_emulator.h>
  37. #include <asm/mipsregs.h>
  38. #include <asm/mipsmtregs.h>
  39. #include <asm/module.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/ptrace.h>
  42. #include <asm/sections.h>
  43. #include <asm/system.h>
  44. #include <asm/tlbdebug.h>
  45. #include <asm/traps.h>
  46. #include <asm/uaccess.h>
  47. #include <asm/watch.h>
  48. #include <asm/mmu_context.h>
  49. #include <asm/types.h>
  50. #include <asm/stacktrace.h>
  51. #include <asm/irq.h>
  52. extern void check_wait(void);
  53. extern asmlinkage void r4k_wait(void);
  54. extern asmlinkage void rollback_handle_int(void);
  55. extern asmlinkage void handle_int(void);
  56. extern asmlinkage void handle_tlbm(void);
  57. extern asmlinkage void handle_tlbl(void);
  58. extern asmlinkage void handle_tlbs(void);
  59. extern asmlinkage void handle_adel(void);
  60. extern asmlinkage void handle_ades(void);
  61. extern asmlinkage void handle_ibe(void);
  62. extern asmlinkage void handle_dbe(void);
  63. extern asmlinkage void handle_sys(void);
  64. extern asmlinkage void handle_bp(void);
  65. extern asmlinkage void handle_ri(void);
  66. extern asmlinkage void handle_ri_rdhwr_vivt(void);
  67. extern asmlinkage void handle_ri_rdhwr(void);
  68. extern asmlinkage void handle_cpu(void);
  69. extern asmlinkage void handle_ov(void);
  70. extern asmlinkage void handle_tr(void);
  71. extern asmlinkage void handle_fpe(void);
  72. extern asmlinkage void handle_mdmx(void);
  73. extern asmlinkage void handle_watch(void);
  74. extern asmlinkage void handle_mt(void);
  75. extern asmlinkage void handle_dsp(void);
  76. extern asmlinkage void handle_mcheck(void);
  77. extern asmlinkage void handle_reserved(void);
  78. extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
  79. struct mips_fpu_struct *ctx, int has_fpu);
  80. void (*board_be_init)(void);
  81. int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
  82. void (*board_nmi_handler_setup)(void);
  83. void (*board_ejtag_handler_setup)(void);
  84. void (*board_bind_eic_interrupt)(int irq, int regset);
  85. static void show_raw_backtrace(unsigned long reg29)
  86. {
  87. unsigned long *sp = (unsigned long *)(reg29 & ~3);
  88. unsigned long addr;
  89. printk("Call Trace:");
  90. #ifdef CONFIG_KALLSYMS
  91. printk("\n");
  92. #endif
  93. while (!kstack_end(sp)) {
  94. unsigned long __user *p =
  95. (unsigned long __user *)(unsigned long)sp++;
  96. if (__get_user(addr, p)) {
  97. printk(" (Bad stack address)");
  98. break;
  99. }
  100. if (__kernel_text_address(addr))
  101. print_ip_sym(addr);
  102. }
  103. printk("\n");
  104. }
  105. #ifdef CONFIG_KALLSYMS
  106. int raw_show_trace;
  107. static int __init set_raw_show_trace(char *str)
  108. {
  109. raw_show_trace = 1;
  110. return 1;
  111. }
  112. __setup("raw_show_trace", set_raw_show_trace);
  113. #endif
  114. static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
  115. {
  116. unsigned long sp = regs->regs[29];
  117. unsigned long ra = regs->regs[31];
  118. unsigned long pc = regs->cp0_epc;
  119. if (raw_show_trace || !__kernel_text_address(pc)) {
  120. show_raw_backtrace(sp);
  121. return;
  122. }
  123. printk("Call Trace:\n");
  124. do {
  125. print_ip_sym(pc);
  126. pc = unwind_stack(task, &sp, pc, &ra);
  127. } while (pc);
  128. printk("\n");
  129. }
  130. /*
  131. * This routine abuses get_user()/put_user() to reference pointers
  132. * with at least a bit of error checking ...
  133. */
  134. static void show_stacktrace(struct task_struct *task,
  135. const struct pt_regs *regs)
  136. {
  137. const int field = 2 * sizeof(unsigned long);
  138. long stackdata;
  139. int i;
  140. unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
  141. printk("Stack :");
  142. i = 0;
  143. while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  144. if (i && ((i % (64 / field)) == 0))
  145. printk("\n ");
  146. if (i > 39) {
  147. printk(" ...");
  148. break;
  149. }
  150. if (__get_user(stackdata, sp++)) {
  151. printk(" (Bad stack address)");
  152. break;
  153. }
  154. printk(" %0*lx", field, stackdata);
  155. i++;
  156. }
  157. printk("\n");
  158. show_backtrace(task, regs);
  159. }
  160. void show_stack(struct task_struct *task, unsigned long *sp)
  161. {
  162. struct pt_regs regs;
  163. if (sp) {
  164. regs.regs[29] = (unsigned long)sp;
  165. regs.regs[31] = 0;
  166. regs.cp0_epc = 0;
  167. } else {
  168. if (task && task != current) {
  169. regs.regs[29] = task->thread.reg29;
  170. regs.regs[31] = 0;
  171. regs.cp0_epc = task->thread.reg31;
  172. } else {
  173. prepare_frametrace(&regs);
  174. }
  175. }
  176. show_stacktrace(task, &regs);
  177. }
  178. /*
  179. * The architecture-independent dump_stack generator
  180. */
  181. void dump_stack(void)
  182. {
  183. struct pt_regs regs;
  184. prepare_frametrace(&regs);
  185. show_backtrace(current, &regs);
  186. }
  187. EXPORT_SYMBOL(dump_stack);
  188. static void show_code(unsigned int __user *pc)
  189. {
  190. long i;
  191. unsigned short __user *pc16 = NULL;
  192. printk("\nCode:");
  193. if ((unsigned long)pc & 1)
  194. pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
  195. for(i = -3 ; i < 6 ; i++) {
  196. unsigned int insn;
  197. if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
  198. printk(" (Bad address in epc)\n");
  199. break;
  200. }
  201. printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
  202. }
  203. }
  204. static void __show_regs(const struct pt_regs *regs)
  205. {
  206. const int field = 2 * sizeof(unsigned long);
  207. unsigned int cause = regs->cp0_cause;
  208. int i;
  209. printk("Cpu %d\n", smp_processor_id());
  210. /*
  211. * Saved main processor registers
  212. */
  213. for (i = 0; i < 32; ) {
  214. if ((i % 4) == 0)
  215. printk("$%2d :", i);
  216. if (i == 0)
  217. printk(" %0*lx", field, 0UL);
  218. else if (i == 26 || i == 27)
  219. printk(" %*s", field, "");
  220. else
  221. printk(" %0*lx", field, regs->regs[i]);
  222. i++;
  223. if ((i % 4) == 0)
  224. printk("\n");
  225. }
  226. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  227. printk("Acx : %0*lx\n", field, regs->acx);
  228. #endif
  229. printk("Hi : %0*lx\n", field, regs->hi);
  230. printk("Lo : %0*lx\n", field, regs->lo);
  231. /*
  232. * Saved cp0 registers
  233. */
  234. printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
  235. (void *) regs->cp0_epc);
  236. printk(" %s\n", print_tainted());
  237. printk("ra : %0*lx %pS\n", field, regs->regs[31],
  238. (void *) regs->regs[31]);
  239. printk("Status: %08x ", (uint32_t) regs->cp0_status);
  240. if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
  241. if (regs->cp0_status & ST0_KUO)
  242. printk("KUo ");
  243. if (regs->cp0_status & ST0_IEO)
  244. printk("IEo ");
  245. if (regs->cp0_status & ST0_KUP)
  246. printk("KUp ");
  247. if (regs->cp0_status & ST0_IEP)
  248. printk("IEp ");
  249. if (regs->cp0_status & ST0_KUC)
  250. printk("KUc ");
  251. if (regs->cp0_status & ST0_IEC)
  252. printk("IEc ");
  253. } else {
  254. if (regs->cp0_status & ST0_KX)
  255. printk("KX ");
  256. if (regs->cp0_status & ST0_SX)
  257. printk("SX ");
  258. if (regs->cp0_status & ST0_UX)
  259. printk("UX ");
  260. switch (regs->cp0_status & ST0_KSU) {
  261. case KSU_USER:
  262. printk("USER ");
  263. break;
  264. case KSU_SUPERVISOR:
  265. printk("SUPERVISOR ");
  266. break;
  267. case KSU_KERNEL:
  268. printk("KERNEL ");
  269. break;
  270. default:
  271. printk("BAD_MODE ");
  272. break;
  273. }
  274. if (regs->cp0_status & ST0_ERL)
  275. printk("ERL ");
  276. if (regs->cp0_status & ST0_EXL)
  277. printk("EXL ");
  278. if (regs->cp0_status & ST0_IE)
  279. printk("IE ");
  280. }
  281. printk("\n");
  282. printk("Cause : %08x\n", cause);
  283. cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
  284. if (1 <= cause && cause <= 5)
  285. printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
  286. printk("PrId : %08x (%s)\n", read_c0_prid(),
  287. cpu_name_string());
  288. }
  289. /*
  290. * FIXME: really the generic show_regs should take a const pointer argument.
  291. */
  292. void show_regs(struct pt_regs *regs)
  293. {
  294. __show_regs((struct pt_regs *)regs);
  295. }
  296. void show_registers(const struct pt_regs *regs)
  297. {
  298. const int field = 2 * sizeof(unsigned long);
  299. __show_regs(regs);
  300. print_modules();
  301. printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
  302. current->comm, current->pid, current_thread_info(), current,
  303. field, current_thread_info()->tp_value);
  304. if (cpu_has_userlocal) {
  305. unsigned long tls;
  306. tls = read_c0_userlocal();
  307. if (tls != current_thread_info()->tp_value)
  308. printk("*HwTLS: %0*lx\n", field, tls);
  309. }
  310. show_stacktrace(current, regs);
  311. show_code((unsigned int __user *) regs->cp0_epc);
  312. printk("\n");
  313. }
  314. static DEFINE_SPINLOCK(die_lock);
  315. void __noreturn die(const char * str, const struct pt_regs * regs)
  316. {
  317. static int die_counter;
  318. #ifdef CONFIG_MIPS_MT_SMTC
  319. unsigned long dvpret = dvpe();
  320. #endif /* CONFIG_MIPS_MT_SMTC */
  321. console_verbose();
  322. spin_lock_irq(&die_lock);
  323. bust_spinlocks(1);
  324. #ifdef CONFIG_MIPS_MT_SMTC
  325. mips_mt_regdump(dvpret);
  326. #endif /* CONFIG_MIPS_MT_SMTC */
  327. printk("%s[#%d]:\n", str, ++die_counter);
  328. show_registers(regs);
  329. add_taint(TAINT_DIE);
  330. spin_unlock_irq(&die_lock);
  331. if (in_interrupt())
  332. panic("Fatal exception in interrupt");
  333. if (panic_on_oops) {
  334. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  335. ssleep(5);
  336. panic("Fatal exception");
  337. }
  338. do_exit(SIGSEGV);
  339. }
  340. extern struct exception_table_entry __start___dbe_table[];
  341. extern struct exception_table_entry __stop___dbe_table[];
  342. __asm__(
  343. " .section __dbe_table, \"a\"\n"
  344. " .previous \n");
  345. /* Given an address, look for it in the exception tables. */
  346. static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
  347. {
  348. const struct exception_table_entry *e;
  349. e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
  350. if (!e)
  351. e = search_module_dbetables(addr);
  352. return e;
  353. }
  354. asmlinkage void do_be(struct pt_regs *regs)
  355. {
  356. const int field = 2 * sizeof(unsigned long);
  357. const struct exception_table_entry *fixup = NULL;
  358. int data = regs->cp0_cause & 4;
  359. int action = MIPS_BE_FATAL;
  360. /* XXX For now. Fixme, this searches the wrong table ... */
  361. if (data && !user_mode(regs))
  362. fixup = search_dbe_tables(exception_epc(regs));
  363. if (fixup)
  364. action = MIPS_BE_FIXUP;
  365. if (board_be_handler)
  366. action = board_be_handler(regs, fixup != NULL);
  367. switch (action) {
  368. case MIPS_BE_DISCARD:
  369. return;
  370. case MIPS_BE_FIXUP:
  371. if (fixup) {
  372. regs->cp0_epc = fixup->nextinsn;
  373. return;
  374. }
  375. break;
  376. default:
  377. break;
  378. }
  379. /*
  380. * Assume it would be too dangerous to continue ...
  381. */
  382. printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
  383. data ? "Data" : "Instruction",
  384. field, regs->cp0_epc, field, regs->regs[31]);
  385. if (notify_die(DIE_OOPS, "bus error", regs, SIGBUS, 0, 0)
  386. == NOTIFY_STOP)
  387. return;
  388. die_if_kernel("Oops", regs);
  389. force_sig(SIGBUS, current);
  390. }
  391. /*
  392. * ll/sc, rdhwr, sync emulation
  393. */
  394. #define OPCODE 0xfc000000
  395. #define BASE 0x03e00000
  396. #define RT 0x001f0000
  397. #define OFFSET 0x0000ffff
  398. #define LL 0xc0000000
  399. #define SC 0xe0000000
  400. #define SPEC0 0x00000000
  401. #define SPEC3 0x7c000000
  402. #define RD 0x0000f800
  403. #define FUNC 0x0000003f
  404. #define SYNC 0x0000000f
  405. #define RDHWR 0x0000003b
  406. /*
  407. * The ll_bit is cleared by r*_switch.S
  408. */
  409. unsigned int ll_bit;
  410. struct task_struct *ll_task;
  411. static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
  412. {
  413. unsigned long value, __user *vaddr;
  414. long offset;
  415. /*
  416. * analyse the ll instruction that just caused a ri exception
  417. * and put the referenced address to addr.
  418. */
  419. /* sign extend offset */
  420. offset = opcode & OFFSET;
  421. offset <<= 16;
  422. offset >>= 16;
  423. vaddr = (unsigned long __user *)
  424. ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
  425. if ((unsigned long)vaddr & 3)
  426. return SIGBUS;
  427. if (get_user(value, vaddr))
  428. return SIGSEGV;
  429. preempt_disable();
  430. if (ll_task == NULL || ll_task == current) {
  431. ll_bit = 1;
  432. } else {
  433. ll_bit = 0;
  434. }
  435. ll_task = current;
  436. preempt_enable();
  437. regs->regs[(opcode & RT) >> 16] = value;
  438. return 0;
  439. }
  440. static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
  441. {
  442. unsigned long __user *vaddr;
  443. unsigned long reg;
  444. long offset;
  445. /*
  446. * analyse the sc instruction that just caused a ri exception
  447. * and put the referenced address to addr.
  448. */
  449. /* sign extend offset */
  450. offset = opcode & OFFSET;
  451. offset <<= 16;
  452. offset >>= 16;
  453. vaddr = (unsigned long __user *)
  454. ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
  455. reg = (opcode & RT) >> 16;
  456. if ((unsigned long)vaddr & 3)
  457. return SIGBUS;
  458. preempt_disable();
  459. if (ll_bit == 0 || ll_task != current) {
  460. regs->regs[reg] = 0;
  461. preempt_enable();
  462. return 0;
  463. }
  464. preempt_enable();
  465. if (put_user(regs->regs[reg], vaddr))
  466. return SIGSEGV;
  467. regs->regs[reg] = 1;
  468. return 0;
  469. }
  470. /*
  471. * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
  472. * opcodes are supposed to result in coprocessor unusable exceptions if
  473. * executed on ll/sc-less processors. That's the theory. In practice a
  474. * few processors such as NEC's VR4100 throw reserved instruction exceptions
  475. * instead, so we're doing the emulation thing in both exception handlers.
  476. */
  477. static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
  478. {
  479. if ((opcode & OPCODE) == LL)
  480. return simulate_ll(regs, opcode);
  481. if ((opcode & OPCODE) == SC)
  482. return simulate_sc(regs, opcode);
  483. return -1; /* Must be something else ... */
  484. }
  485. /*
  486. * Simulate trapping 'rdhwr' instructions to provide user accessible
  487. * registers not implemented in hardware.
  488. */
  489. static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
  490. {
  491. struct thread_info *ti = task_thread_info(current);
  492. if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
  493. int rd = (opcode & RD) >> 11;
  494. int rt = (opcode & RT) >> 16;
  495. switch (rd) {
  496. case 0: /* CPU number */
  497. regs->regs[rt] = smp_processor_id();
  498. return 0;
  499. case 1: /* SYNCI length */
  500. regs->regs[rt] = min(current_cpu_data.dcache.linesz,
  501. current_cpu_data.icache.linesz);
  502. return 0;
  503. case 2: /* Read count register */
  504. regs->regs[rt] = read_c0_count();
  505. return 0;
  506. case 3: /* Count register resolution */
  507. switch (current_cpu_data.cputype) {
  508. case CPU_20KC:
  509. case CPU_25KF:
  510. regs->regs[rt] = 1;
  511. break;
  512. default:
  513. regs->regs[rt] = 2;
  514. }
  515. return 0;
  516. case 29:
  517. regs->regs[rt] = ti->tp_value;
  518. return 0;
  519. default:
  520. return -1;
  521. }
  522. }
  523. /* Not ours. */
  524. return -1;
  525. }
  526. static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
  527. {
  528. if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
  529. return 0;
  530. return -1; /* Must be something else ... */
  531. }
  532. asmlinkage void do_ov(struct pt_regs *regs)
  533. {
  534. siginfo_t info;
  535. die_if_kernel("Integer overflow", regs);
  536. info.si_code = FPE_INTOVF;
  537. info.si_signo = SIGFPE;
  538. info.si_errno = 0;
  539. info.si_addr = (void __user *) regs->cp0_epc;
  540. force_sig_info(SIGFPE, &info, current);
  541. }
  542. /*
  543. * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
  544. */
  545. asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
  546. {
  547. siginfo_t info;
  548. if (notify_die(DIE_FP, "FP exception", regs, SIGFPE, 0, 0)
  549. == NOTIFY_STOP)
  550. return;
  551. die_if_kernel("FP exception in kernel code", regs);
  552. if (fcr31 & FPU_CSR_UNI_X) {
  553. int sig;
  554. /*
  555. * Unimplemented operation exception. If we've got the full
  556. * software emulator on-board, let's use it...
  557. *
  558. * Force FPU to dump state into task/thread context. We're
  559. * moving a lot of data here for what is probably a single
  560. * instruction, but the alternative is to pre-decode the FP
  561. * register operands before invoking the emulator, which seems
  562. * a bit extreme for what should be an infrequent event.
  563. */
  564. /* Ensure 'resume' not overwrite saved fp context again. */
  565. lose_fpu(1);
  566. /* Run the emulator */
  567. sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
  568. /*
  569. * We can't allow the emulated instruction to leave any of
  570. * the cause bit set in $fcr31.
  571. */
  572. current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
  573. /* Restore the hardware register state */
  574. own_fpu(1); /* Using the FPU again. */
  575. /* If something went wrong, signal */
  576. if (sig)
  577. force_sig(sig, current);
  578. return;
  579. } else if (fcr31 & FPU_CSR_INV_X)
  580. info.si_code = FPE_FLTINV;
  581. else if (fcr31 & FPU_CSR_DIV_X)
  582. info.si_code = FPE_FLTDIV;
  583. else if (fcr31 & FPU_CSR_OVF_X)
  584. info.si_code = FPE_FLTOVF;
  585. else if (fcr31 & FPU_CSR_UDF_X)
  586. info.si_code = FPE_FLTUND;
  587. else if (fcr31 & FPU_CSR_INE_X)
  588. info.si_code = FPE_FLTRES;
  589. else
  590. info.si_code = __SI_FAULT;
  591. info.si_signo = SIGFPE;
  592. info.si_errno = 0;
  593. info.si_addr = (void __user *) regs->cp0_epc;
  594. force_sig_info(SIGFPE, &info, current);
  595. }
  596. static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
  597. const char *str)
  598. {
  599. siginfo_t info;
  600. char b[40];
  601. if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
  602. return;
  603. /*
  604. * A short test says that IRIX 5.3 sends SIGTRAP for all trap
  605. * insns, even for trap and break codes that indicate arithmetic
  606. * failures. Weird ...
  607. * But should we continue the brokenness??? --macro
  608. */
  609. switch (code) {
  610. case BRK_OVERFLOW:
  611. case BRK_DIVZERO:
  612. scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
  613. die_if_kernel(b, regs);
  614. if (code == BRK_DIVZERO)
  615. info.si_code = FPE_INTDIV;
  616. else
  617. info.si_code = FPE_INTOVF;
  618. info.si_signo = SIGFPE;
  619. info.si_errno = 0;
  620. info.si_addr = (void __user *) regs->cp0_epc;
  621. force_sig_info(SIGFPE, &info, current);
  622. break;
  623. case BRK_BUG:
  624. die_if_kernel("Kernel bug detected", regs);
  625. force_sig(SIGTRAP, current);
  626. break;
  627. case BRK_MEMU:
  628. /*
  629. * Address errors may be deliberately induced by the FPU
  630. * emulator to retake control of the CPU after executing the
  631. * instruction in the delay slot of an emulated branch.
  632. *
  633. * Terminate if exception was recognized as a delay slot return
  634. * otherwise handle as normal.
  635. */
  636. if (do_dsemulret(regs))
  637. return;
  638. die_if_kernel("Math emu break/trap", regs);
  639. force_sig(SIGTRAP, current);
  640. break;
  641. default:
  642. scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
  643. die_if_kernel(b, regs);
  644. force_sig(SIGTRAP, current);
  645. }
  646. }
  647. asmlinkage void do_bp(struct pt_regs *regs)
  648. {
  649. unsigned int opcode, bcode;
  650. if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  651. goto out_sigsegv;
  652. /*
  653. * There is the ancient bug in the MIPS assemblers that the break
  654. * code starts left to bit 16 instead to bit 6 in the opcode.
  655. * Gas is bug-compatible, but not always, grrr...
  656. * We handle both cases with a simple heuristics. --macro
  657. */
  658. bcode = ((opcode >> 6) & ((1 << 20) - 1));
  659. if (bcode >= (1 << 10))
  660. bcode >>= 10;
  661. do_trap_or_bp(regs, bcode, "Break");
  662. return;
  663. out_sigsegv:
  664. force_sig(SIGSEGV, current);
  665. }
  666. asmlinkage void do_tr(struct pt_regs *regs)
  667. {
  668. unsigned int opcode, tcode = 0;
  669. if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
  670. goto out_sigsegv;
  671. /* Immediate versions don't provide a code. */
  672. if (!(opcode & OPCODE))
  673. tcode = ((opcode >> 6) & ((1 << 10) - 1));
  674. do_trap_or_bp(regs, tcode, "Trap");
  675. return;
  676. out_sigsegv:
  677. force_sig(SIGSEGV, current);
  678. }
  679. asmlinkage void do_ri(struct pt_regs *regs)
  680. {
  681. unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
  682. unsigned long old_epc = regs->cp0_epc;
  683. unsigned int opcode = 0;
  684. int status = -1;
  685. if (notify_die(DIE_RI, "RI Fault", regs, SIGSEGV, 0, 0)
  686. == NOTIFY_STOP)
  687. return;
  688. die_if_kernel("Reserved instruction in kernel code", regs);
  689. if (unlikely(compute_return_epc(regs) < 0))
  690. return;
  691. if (unlikely(get_user(opcode, epc) < 0))
  692. status = SIGSEGV;
  693. if (!cpu_has_llsc && status < 0)
  694. status = simulate_llsc(regs, opcode);
  695. if (status < 0)
  696. status = simulate_rdhwr(regs, opcode);
  697. if (status < 0)
  698. status = simulate_sync(regs, opcode);
  699. if (status < 0)
  700. status = SIGILL;
  701. if (unlikely(status > 0)) {
  702. regs->cp0_epc = old_epc; /* Undo skip-over. */
  703. force_sig(status, current);
  704. }
  705. }
  706. /*
  707. * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
  708. * emulated more than some threshold number of instructions, force migration to
  709. * a "CPU" that has FP support.
  710. */
  711. static void mt_ase_fp_affinity(void)
  712. {
  713. #ifdef CONFIG_MIPS_MT_FPAFF
  714. if (mt_fpemul_threshold > 0 &&
  715. ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
  716. /*
  717. * If there's no FPU present, or if the application has already
  718. * restricted the allowed set to exclude any CPUs with FPUs,
  719. * we'll skip the procedure.
  720. */
  721. if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
  722. cpumask_t tmask;
  723. current->thread.user_cpus_allowed
  724. = current->cpus_allowed;
  725. cpus_and(tmask, current->cpus_allowed,
  726. mt_fpu_cpumask);
  727. set_cpus_allowed(current, tmask);
  728. set_thread_flag(TIF_FPUBOUND);
  729. }
  730. }
  731. #endif /* CONFIG_MIPS_MT_FPAFF */
  732. }
  733. /*
  734. * No lock; only written during early bootup by CPU 0.
  735. */
  736. static RAW_NOTIFIER_HEAD(cu2_chain);
  737. int __ref register_cu2_notifier(struct notifier_block *nb)
  738. {
  739. return raw_notifier_chain_register(&cu2_chain, nb);
  740. }
  741. int cu2_notifier_call_chain(unsigned long val, void *v)
  742. {
  743. return raw_notifier_call_chain(&cu2_chain, val, v);
  744. }
  745. static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
  746. void *data)
  747. {
  748. struct pt_regs *regs = data;
  749. switch (action) {
  750. default:
  751. die_if_kernel("Unhandled kernel unaligned access or invalid "
  752. "instruction", regs);
  753. /* Fall through */
  754. case CU2_EXCEPTION:
  755. force_sig(SIGILL, current);
  756. }
  757. return NOTIFY_OK;
  758. }
  759. static struct notifier_block default_cu2_notifier = {
  760. .notifier_call = default_cu2_call,
  761. .priority = 0x80000000, /* Run last */
  762. };
  763. asmlinkage void do_cpu(struct pt_regs *regs)
  764. {
  765. unsigned int __user *epc;
  766. unsigned long old_epc;
  767. unsigned int opcode;
  768. unsigned int cpid;
  769. int status;
  770. unsigned long __maybe_unused flags;
  771. die_if_kernel("do_cpu invoked from kernel context!", regs);
  772. cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
  773. switch (cpid) {
  774. case 0:
  775. epc = (unsigned int __user *)exception_epc(regs);
  776. old_epc = regs->cp0_epc;
  777. opcode = 0;
  778. status = -1;
  779. if (unlikely(compute_return_epc(regs) < 0))
  780. return;
  781. if (unlikely(get_user(opcode, epc) < 0))
  782. status = SIGSEGV;
  783. if (!cpu_has_llsc && status < 0)
  784. status = simulate_llsc(regs, opcode);
  785. if (status < 0)
  786. status = simulate_rdhwr(regs, opcode);
  787. if (status < 0)
  788. status = SIGILL;
  789. if (unlikely(status > 0)) {
  790. regs->cp0_epc = old_epc; /* Undo skip-over. */
  791. force_sig(status, current);
  792. }
  793. return;
  794. case 1:
  795. if (used_math()) /* Using the FPU again. */
  796. own_fpu(1);
  797. else { /* First time FPU user. */
  798. init_fpu();
  799. set_used_math();
  800. }
  801. if (!raw_cpu_has_fpu) {
  802. int sig;
  803. sig = fpu_emulator_cop1Handler(regs,
  804. &current->thread.fpu, 0);
  805. if (sig)
  806. force_sig(sig, current);
  807. else
  808. mt_ase_fp_affinity();
  809. }
  810. return;
  811. case 2:
  812. raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
  813. break;
  814. case 3:
  815. break;
  816. }
  817. force_sig(SIGILL, current);
  818. }
  819. asmlinkage void do_mdmx(struct pt_regs *regs)
  820. {
  821. force_sig(SIGILL, current);
  822. }
  823. /*
  824. * Called with interrupts disabled.
  825. */
  826. asmlinkage void do_watch(struct pt_regs *regs)
  827. {
  828. u32 cause;
  829. /*
  830. * Clear WP (bit 22) bit of cause register so we don't loop
  831. * forever.
  832. */
  833. cause = read_c0_cause();
  834. cause &= ~(1 << 22);
  835. write_c0_cause(cause);
  836. /*
  837. * If the current thread has the watch registers loaded, save
  838. * their values and send SIGTRAP. Otherwise another thread
  839. * left the registers set, clear them and continue.
  840. */
  841. if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
  842. mips_read_watch_registers();
  843. local_irq_enable();
  844. force_sig(SIGTRAP, current);
  845. } else {
  846. mips_clear_watch_registers();
  847. local_irq_enable();
  848. }
  849. }
  850. asmlinkage void do_mcheck(struct pt_regs *regs)
  851. {
  852. const int field = 2 * sizeof(unsigned long);
  853. int multi_match = regs->cp0_status & ST0_TS;
  854. show_regs(regs);
  855. if (multi_match) {
  856. printk("Index : %0x\n", read_c0_index());
  857. printk("Pagemask: %0x\n", read_c0_pagemask());
  858. printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
  859. printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
  860. printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
  861. printk("\n");
  862. dump_tlb_all();
  863. }
  864. show_code((unsigned int __user *) regs->cp0_epc);
  865. /*
  866. * Some chips may have other causes of machine check (e.g. SB1
  867. * graduation timer)
  868. */
  869. panic("Caught Machine Check exception - %scaused by multiple "
  870. "matching entries in the TLB.",
  871. (multi_match) ? "" : "not ");
  872. }
  873. asmlinkage void do_mt(struct pt_regs *regs)
  874. {
  875. int subcode;
  876. subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
  877. >> VPECONTROL_EXCPT_SHIFT;
  878. switch (subcode) {
  879. case 0:
  880. printk(KERN_DEBUG "Thread Underflow\n");
  881. break;
  882. case 1:
  883. printk(KERN_DEBUG "Thread Overflow\n");
  884. break;
  885. case 2:
  886. printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
  887. break;
  888. case 3:
  889. printk(KERN_DEBUG "Gating Storage Exception\n");
  890. break;
  891. case 4:
  892. printk(KERN_DEBUG "YIELD Scheduler Exception\n");
  893. break;
  894. case 5:
  895. printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
  896. break;
  897. default:
  898. printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
  899. subcode);
  900. break;
  901. }
  902. die_if_kernel("MIPS MT Thread exception in kernel", regs);
  903. force_sig(SIGILL, current);
  904. }
  905. asmlinkage void do_dsp(struct pt_regs *regs)
  906. {
  907. if (cpu_has_dsp)
  908. panic("Unexpected DSP exception\n");
  909. force_sig(SIGILL, current);
  910. }
  911. asmlinkage void do_reserved(struct pt_regs *regs)
  912. {
  913. /*
  914. * Game over - no way to handle this if it ever occurs. Most probably
  915. * caused by a new unknown cpu type or after another deadly
  916. * hard/software error.
  917. */
  918. show_regs(regs);
  919. panic("Caught reserved exception %ld - should not happen.",
  920. (regs->cp0_cause & 0x7f) >> 2);
  921. }
  922. static int __initdata l1parity = 1;
  923. static int __init nol1parity(char *s)
  924. {
  925. l1parity = 0;
  926. return 1;
  927. }
  928. __setup("nol1par", nol1parity);
  929. static int __initdata l2parity = 1;
  930. static int __init nol2parity(char *s)
  931. {
  932. l2parity = 0;
  933. return 1;
  934. }
  935. __setup("nol2par", nol2parity);
  936. /*
  937. * Some MIPS CPUs can enable/disable for cache parity detection, but do
  938. * it different ways.
  939. */
  940. static inline void parity_protection_init(void)
  941. {
  942. switch (current_cpu_type()) {
  943. case CPU_24K:
  944. case CPU_34K:
  945. case CPU_74K:
  946. case CPU_1004K:
  947. {
  948. #define ERRCTL_PE 0x80000000
  949. #define ERRCTL_L2P 0x00800000
  950. unsigned long errctl;
  951. unsigned int l1parity_present, l2parity_present;
  952. errctl = read_c0_ecc();
  953. errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
  954. /* probe L1 parity support */
  955. write_c0_ecc(errctl | ERRCTL_PE);
  956. back_to_back_c0_hazard();
  957. l1parity_present = (read_c0_ecc() & ERRCTL_PE);
  958. /* probe L2 parity support */
  959. write_c0_ecc(errctl|ERRCTL_L2P);
  960. back_to_back_c0_hazard();
  961. l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
  962. if (l1parity_present && l2parity_present) {
  963. if (l1parity)
  964. errctl |= ERRCTL_PE;
  965. if (l1parity ^ l2parity)
  966. errctl |= ERRCTL_L2P;
  967. } else if (l1parity_present) {
  968. if (l1parity)
  969. errctl |= ERRCTL_PE;
  970. } else if (l2parity_present) {
  971. if (l2parity)
  972. errctl |= ERRCTL_L2P;
  973. } else {
  974. /* No parity available */
  975. }
  976. printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
  977. write_c0_ecc(errctl);
  978. back_to_back_c0_hazard();
  979. errctl = read_c0_ecc();
  980. printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
  981. if (l1parity_present)
  982. printk(KERN_INFO "Cache parity protection %sabled\n",
  983. (errctl & ERRCTL_PE) ? "en" : "dis");
  984. if (l2parity_present) {
  985. if (l1parity_present && l1parity)
  986. errctl ^= ERRCTL_L2P;
  987. printk(KERN_INFO "L2 cache parity protection %sabled\n",
  988. (errctl & ERRCTL_L2P) ? "en" : "dis");
  989. }
  990. }
  991. break;
  992. case CPU_5KC:
  993. write_c0_ecc(0x80000000);
  994. back_to_back_c0_hazard();
  995. /* Set the PE bit (bit 31) in the c0_errctl register. */
  996. printk(KERN_INFO "Cache parity protection %sabled\n",
  997. (read_c0_ecc() & 0x80000000) ? "en" : "dis");
  998. break;
  999. case CPU_20KC:
  1000. case CPU_25KF:
  1001. /* Clear the DE bit (bit 16) in the c0_status register. */
  1002. printk(KERN_INFO "Enable cache parity protection for "
  1003. "MIPS 20KC/25KF CPUs.\n");
  1004. clear_c0_status(ST0_DE);
  1005. break;
  1006. default:
  1007. break;
  1008. }
  1009. }
  1010. asmlinkage void cache_parity_error(void)
  1011. {
  1012. const int field = 2 * sizeof(unsigned long);
  1013. unsigned int reg_val;
  1014. /* For the moment, report the problem and hang. */
  1015. printk("Cache error exception:\n");
  1016. printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
  1017. reg_val = read_c0_cacheerr();
  1018. printk("c0_cacheerr == %08x\n", reg_val);
  1019. printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
  1020. reg_val & (1<<30) ? "secondary" : "primary",
  1021. reg_val & (1<<31) ? "data" : "insn");
  1022. printk("Error bits: %s%s%s%s%s%s%s\n",
  1023. reg_val & (1<<29) ? "ED " : "",
  1024. reg_val & (1<<28) ? "ET " : "",
  1025. reg_val & (1<<26) ? "EE " : "",
  1026. reg_val & (1<<25) ? "EB " : "",
  1027. reg_val & (1<<24) ? "EI " : "",
  1028. reg_val & (1<<23) ? "E1 " : "",
  1029. reg_val & (1<<22) ? "E0 " : "");
  1030. printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
  1031. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  1032. if (reg_val & (1<<22))
  1033. printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
  1034. if (reg_val & (1<<23))
  1035. printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
  1036. #endif
  1037. panic("Can't handle the cache error!");
  1038. }
  1039. /*
  1040. * SDBBP EJTAG debug exception handler.
  1041. * We skip the instruction and return to the next instruction.
  1042. */
  1043. void ejtag_exception_handler(struct pt_regs *regs)
  1044. {
  1045. const int field = 2 * sizeof(unsigned long);
  1046. unsigned long depc, old_epc;
  1047. unsigned int debug;
  1048. printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
  1049. depc = read_c0_depc();
  1050. debug = read_c0_debug();
  1051. printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
  1052. if (debug & 0x80000000) {
  1053. /*
  1054. * In branch delay slot.
  1055. * We cheat a little bit here and use EPC to calculate the
  1056. * debug return address (DEPC). EPC is restored after the
  1057. * calculation.
  1058. */
  1059. old_epc = regs->cp0_epc;
  1060. regs->cp0_epc = depc;
  1061. __compute_return_epc(regs);
  1062. depc = regs->cp0_epc;
  1063. regs->cp0_epc = old_epc;
  1064. } else
  1065. depc += 4;
  1066. write_c0_depc(depc);
  1067. #if 0
  1068. printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
  1069. write_c0_debug(debug | 0x100);
  1070. #endif
  1071. }
  1072. /*
  1073. * NMI exception handler.
  1074. */
  1075. NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
  1076. {
  1077. bust_spinlocks(1);
  1078. printk("NMI taken!!!!\n");
  1079. die("NMI", regs);
  1080. }
  1081. #define VECTORSPACING 0x100 /* for EI/VI mode */
  1082. unsigned long ebase;
  1083. unsigned long exception_handlers[32];
  1084. unsigned long vi_handlers[64];
  1085. /*
  1086. * As a side effect of the way this is implemented we're limited
  1087. * to interrupt handlers in the address range from
  1088. * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
  1089. */
  1090. void *set_except_vector(int n, void *addr)
  1091. {
  1092. unsigned long handler = (unsigned long) addr;
  1093. unsigned long old_handler = exception_handlers[n];
  1094. exception_handlers[n] = handler;
  1095. if (n == 0 && cpu_has_divec) {
  1096. *(u32 *)(ebase + 0x200) = 0x08000000 |
  1097. (0x03ffffff & (handler >> 2));
  1098. local_flush_icache_range(ebase + 0x200, ebase + 0x204);
  1099. }
  1100. return (void *)old_handler;
  1101. }
  1102. static asmlinkage void do_default_vi(void)
  1103. {
  1104. show_regs(get_irq_regs());
  1105. panic("Caught unexpected vectored interrupt.");
  1106. }
  1107. static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
  1108. {
  1109. unsigned long handler;
  1110. unsigned long old_handler = vi_handlers[n];
  1111. int srssets = current_cpu_data.srsets;
  1112. u32 *w;
  1113. unsigned char *b;
  1114. BUG_ON(!cpu_has_veic && !cpu_has_vint);
  1115. if (addr == NULL) {
  1116. handler = (unsigned long) do_default_vi;
  1117. srs = 0;
  1118. } else
  1119. handler = (unsigned long) addr;
  1120. vi_handlers[n] = (unsigned long) addr;
  1121. b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
  1122. if (srs >= srssets)
  1123. panic("Shadow register set %d not supported", srs);
  1124. if (cpu_has_veic) {
  1125. if (board_bind_eic_interrupt)
  1126. board_bind_eic_interrupt(n, srs);
  1127. } else if (cpu_has_vint) {
  1128. /* SRSMap is only defined if shadow sets are implemented */
  1129. if (srssets > 1)
  1130. change_c0_srsmap(0xf << n*4, srs << n*4);
  1131. }
  1132. if (srs == 0) {
  1133. /*
  1134. * If no shadow set is selected then use the default handler
  1135. * that does normal register saving and a standard interrupt exit
  1136. */
  1137. extern char except_vec_vi, except_vec_vi_lui;
  1138. extern char except_vec_vi_ori, except_vec_vi_end;
  1139. extern char rollback_except_vec_vi;
  1140. char *vec_start = (cpu_wait == r4k_wait) ?
  1141. &rollback_except_vec_vi : &except_vec_vi;
  1142. #ifdef CONFIG_MIPS_MT_SMTC
  1143. /*
  1144. * We need to provide the SMTC vectored interrupt handler
  1145. * not only with the address of the handler, but with the
  1146. * Status.IM bit to be masked before going there.
  1147. */
  1148. extern char except_vec_vi_mori;
  1149. const int mori_offset = &except_vec_vi_mori - vec_start;
  1150. #endif /* CONFIG_MIPS_MT_SMTC */
  1151. const int handler_len = &except_vec_vi_end - vec_start;
  1152. const int lui_offset = &except_vec_vi_lui - vec_start;
  1153. const int ori_offset = &except_vec_vi_ori - vec_start;
  1154. if (handler_len > VECTORSPACING) {
  1155. /*
  1156. * Sigh... panicing won't help as the console
  1157. * is probably not configured :(
  1158. */
  1159. panic("VECTORSPACING too small");
  1160. }
  1161. memcpy(b, vec_start, handler_len);
  1162. #ifdef CONFIG_MIPS_MT_SMTC
  1163. BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
  1164. w = (u32 *)(b + mori_offset);
  1165. *w = (*w & 0xffff0000) | (0x100 << n);
  1166. #endif /* CONFIG_MIPS_MT_SMTC */
  1167. w = (u32 *)(b + lui_offset);
  1168. *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
  1169. w = (u32 *)(b + ori_offset);
  1170. *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
  1171. local_flush_icache_range((unsigned long)b,
  1172. (unsigned long)(b+handler_len));
  1173. }
  1174. else {
  1175. /*
  1176. * In other cases jump directly to the interrupt handler
  1177. *
  1178. * It is the handlers responsibility to save registers if required
  1179. * (eg hi/lo) and return from the exception using "eret"
  1180. */
  1181. w = (u32 *)b;
  1182. *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
  1183. *w = 0;
  1184. local_flush_icache_range((unsigned long)b,
  1185. (unsigned long)(b+8));
  1186. }
  1187. return (void *)old_handler;
  1188. }
  1189. void *set_vi_handler(int n, vi_handler_t addr)
  1190. {
  1191. return set_vi_srs_handler(n, addr, 0);
  1192. }
  1193. extern void cpu_cache_init(void);
  1194. extern void tlb_init(void);
  1195. extern void flush_tlb_handlers(void);
  1196. /*
  1197. * Timer interrupt
  1198. */
  1199. int cp0_compare_irq;
  1200. /*
  1201. * Performance counter IRQ or -1 if shared with timer
  1202. */
  1203. int cp0_perfcount_irq;
  1204. EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
  1205. static int __cpuinitdata noulri;
  1206. static int __init ulri_disable(char *s)
  1207. {
  1208. pr_info("Disabling ulri\n");
  1209. noulri = 1;
  1210. return 1;
  1211. }
  1212. __setup("noulri", ulri_disable);
  1213. void __cpuinit per_cpu_trap_init(void)
  1214. {
  1215. unsigned int cpu = smp_processor_id();
  1216. unsigned int status_set = ST0_CU0;
  1217. #ifdef CONFIG_MIPS_MT_SMTC
  1218. int secondaryTC = 0;
  1219. int bootTC = (cpu == 0);
  1220. /*
  1221. * Only do per_cpu_trap_init() for first TC of Each VPE.
  1222. * Note that this hack assumes that the SMTC init code
  1223. * assigns TCs consecutively and in ascending order.
  1224. */
  1225. if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
  1226. ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
  1227. secondaryTC = 1;
  1228. #endif /* CONFIG_MIPS_MT_SMTC */
  1229. /*
  1230. * Disable coprocessors and select 32-bit or 64-bit addressing
  1231. * and the 16/32 or 32/32 FPR register model. Reset the BEV
  1232. * flag that some firmware may have left set and the TS bit (for
  1233. * IP27). Set XX for ISA IV code to work.
  1234. */
  1235. #ifdef CONFIG_64BIT
  1236. status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
  1237. #endif
  1238. if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
  1239. status_set |= ST0_XX;
  1240. if (cpu_has_dsp)
  1241. status_set |= ST0_MX;
  1242. change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
  1243. status_set);
  1244. if (cpu_has_mips_r2) {
  1245. unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits;
  1246. if (!noulri && cpu_has_userlocal)
  1247. enable |= (1 << 29);
  1248. write_c0_hwrena(enable);
  1249. }
  1250. #ifdef CONFIG_MIPS_MT_SMTC
  1251. if (!secondaryTC) {
  1252. #endif /* CONFIG_MIPS_MT_SMTC */
  1253. if (cpu_has_veic || cpu_has_vint) {
  1254. unsigned long sr = set_c0_status(ST0_BEV);
  1255. write_c0_ebase(ebase);
  1256. write_c0_status(sr);
  1257. /* Setting vector spacing enables EI/VI mode */
  1258. change_c0_intctl(0x3e0, VECTORSPACING);
  1259. }
  1260. if (cpu_has_divec) {
  1261. if (cpu_has_mipsmt) {
  1262. unsigned int vpflags = dvpe();
  1263. set_c0_cause(CAUSEF_IV);
  1264. evpe(vpflags);
  1265. } else
  1266. set_c0_cause(CAUSEF_IV);
  1267. }
  1268. /*
  1269. * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
  1270. *
  1271. * o read IntCtl.IPTI to determine the timer interrupt
  1272. * o read IntCtl.IPPCI to determine the performance counter interrupt
  1273. */
  1274. if (cpu_has_mips_r2) {
  1275. cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
  1276. cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
  1277. if (cp0_perfcount_irq == cp0_compare_irq)
  1278. cp0_perfcount_irq = -1;
  1279. } else {
  1280. cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
  1281. cp0_perfcount_irq = -1;
  1282. }
  1283. #ifdef CONFIG_MIPS_MT_SMTC
  1284. }
  1285. #endif /* CONFIG_MIPS_MT_SMTC */
  1286. cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
  1287. TLBMISS_HANDLER_SETUP();
  1288. atomic_inc(&init_mm.mm_count);
  1289. current->active_mm = &init_mm;
  1290. BUG_ON(current->mm);
  1291. enter_lazy_tlb(&init_mm, current);
  1292. #ifdef CONFIG_MIPS_MT_SMTC
  1293. if (bootTC) {
  1294. #endif /* CONFIG_MIPS_MT_SMTC */
  1295. cpu_cache_init();
  1296. tlb_init();
  1297. #ifdef CONFIG_MIPS_MT_SMTC
  1298. } else if (!secondaryTC) {
  1299. /*
  1300. * First TC in non-boot VPE must do subset of tlb_init()
  1301. * for MMU countrol registers.
  1302. */
  1303. write_c0_pagemask(PM_DEFAULT_MASK);
  1304. write_c0_wired(0);
  1305. }
  1306. #endif /* CONFIG_MIPS_MT_SMTC */
  1307. }
  1308. /* Install CPU exception handler */
  1309. void __init set_handler(unsigned long offset, void *addr, unsigned long size)
  1310. {
  1311. memcpy((void *)(ebase + offset), addr, size);
  1312. local_flush_icache_range(ebase + offset, ebase + offset + size);
  1313. }
  1314. static char panic_null_cerr[] __cpuinitdata =
  1315. "Trying to set NULL cache error exception handler";
  1316. /*
  1317. * Install uncached CPU exception handler.
  1318. * This is suitable only for the cache error exception which is the only
  1319. * exception handler that is being run uncached.
  1320. */
  1321. void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
  1322. unsigned long size)
  1323. {
  1324. #ifdef CONFIG_32BIT
  1325. unsigned long uncached_ebase = KSEG1ADDR(ebase);
  1326. #endif
  1327. #ifdef CONFIG_64BIT
  1328. unsigned long uncached_ebase = TO_UNCAC(ebase);
  1329. #endif
  1330. if (!addr)
  1331. panic(panic_null_cerr);
  1332. memcpy((void *)(uncached_ebase + offset), addr, size);
  1333. }
  1334. static int __initdata rdhwr_noopt;
  1335. static int __init set_rdhwr_noopt(char *str)
  1336. {
  1337. rdhwr_noopt = 1;
  1338. return 1;
  1339. }
  1340. __setup("rdhwr_noopt", set_rdhwr_noopt);
  1341. void __init trap_init(void)
  1342. {
  1343. extern char except_vec3_generic, except_vec3_r4000;
  1344. extern char except_vec4;
  1345. unsigned long i;
  1346. int rollback;
  1347. check_wait();
  1348. rollback = (cpu_wait == r4k_wait);
  1349. #if defined(CONFIG_KGDB)
  1350. if (kgdb_early_setup)
  1351. return; /* Already done */
  1352. #endif
  1353. if (cpu_has_veic || cpu_has_vint) {
  1354. unsigned long size = 0x200 + VECTORSPACING*64;
  1355. ebase = (unsigned long)
  1356. __alloc_bootmem(size, 1 << fls(size), 0);
  1357. } else {
  1358. ebase = CAC_BASE;
  1359. if (cpu_has_mips_r2)
  1360. ebase += (read_c0_ebase() & 0x3ffff000);
  1361. }
  1362. per_cpu_trap_init();
  1363. /*
  1364. * Copy the generic exception handlers to their final destination.
  1365. * This will be overriden later as suitable for a particular
  1366. * configuration.
  1367. */
  1368. set_handler(0x180, &except_vec3_generic, 0x80);
  1369. /*
  1370. * Setup default vectors
  1371. */
  1372. for (i = 0; i <= 31; i++)
  1373. set_except_vector(i, handle_reserved);
  1374. /*
  1375. * Copy the EJTAG debug exception vector handler code to it's final
  1376. * destination.
  1377. */
  1378. if (cpu_has_ejtag && board_ejtag_handler_setup)
  1379. board_ejtag_handler_setup();
  1380. /*
  1381. * Only some CPUs have the watch exceptions.
  1382. */
  1383. if (cpu_has_watch)
  1384. set_except_vector(23, handle_watch);
  1385. /*
  1386. * Initialise interrupt handlers
  1387. */
  1388. if (cpu_has_veic || cpu_has_vint) {
  1389. int nvec = cpu_has_veic ? 64 : 8;
  1390. for (i = 0; i < nvec; i++)
  1391. set_vi_handler(i, NULL);
  1392. }
  1393. else if (cpu_has_divec)
  1394. set_handler(0x200, &except_vec4, 0x8);
  1395. /*
  1396. * Some CPUs can enable/disable for cache parity detection, but does
  1397. * it different ways.
  1398. */
  1399. parity_protection_init();
  1400. /*
  1401. * The Data Bus Errors / Instruction Bus Errors are signaled
  1402. * by external hardware. Therefore these two exceptions
  1403. * may have board specific handlers.
  1404. */
  1405. if (board_be_init)
  1406. board_be_init();
  1407. set_except_vector(0, rollback ? rollback_handle_int : handle_int);
  1408. set_except_vector(1, handle_tlbm);
  1409. set_except_vector(2, handle_tlbl);
  1410. set_except_vector(3, handle_tlbs);
  1411. set_except_vector(4, handle_adel);
  1412. set_except_vector(5, handle_ades);
  1413. set_except_vector(6, handle_ibe);
  1414. set_except_vector(7, handle_dbe);
  1415. set_except_vector(8, handle_sys);
  1416. set_except_vector(9, handle_bp);
  1417. set_except_vector(10, rdhwr_noopt ? handle_ri :
  1418. (cpu_has_vtag_icache ?
  1419. handle_ri_rdhwr_vivt : handle_ri_rdhwr));
  1420. set_except_vector(11, handle_cpu);
  1421. set_except_vector(12, handle_ov);
  1422. set_except_vector(13, handle_tr);
  1423. if (current_cpu_type() == CPU_R6000 ||
  1424. current_cpu_type() == CPU_R6000A) {
  1425. /*
  1426. * The R6000 is the only R-series CPU that features a machine
  1427. * check exception (similar to the R4000 cache error) and
  1428. * unaligned ldc1/sdc1 exception. The handlers have not been
  1429. * written yet. Well, anyway there is no R6000 machine on the
  1430. * current list of targets for Linux/MIPS.
  1431. * (Duh, crap, there is someone with a triple R6k machine)
  1432. */
  1433. //set_except_vector(14, handle_mc);
  1434. //set_except_vector(15, handle_ndc);
  1435. }
  1436. if (board_nmi_handler_setup)
  1437. board_nmi_handler_setup();
  1438. if (cpu_has_fpu && !cpu_has_nofpuex)
  1439. set_except_vector(15, handle_fpe);
  1440. set_except_vector(22, handle_mdmx);
  1441. if (cpu_has_mcheck)
  1442. set_except_vector(24, handle_mcheck);
  1443. if (cpu_has_mipsmt)
  1444. set_except_vector(25, handle_mt);
  1445. set_except_vector(26, handle_dsp);
  1446. if (cpu_has_vce)
  1447. /* Special exception: R4[04]00 uses also the divec space. */
  1448. memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
  1449. else if (cpu_has_4kex)
  1450. memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
  1451. else
  1452. memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
  1453. local_flush_icache_range(ebase, ebase + 0x400);
  1454. flush_tlb_handlers();
  1455. sort_extable(__start___dbe_table, __stop___dbe_table);
  1456. register_cu2_notifier(&default_cu2_notifier);
  1457. }