traps_32.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. /*
  2. * 'traps.c' handles hardware traps and faults after we have saved some
  3. * state in 'entry.S'.
  4. *
  5. * SuperH version: Copyright (C) 1999 Niibe Yutaka
  6. * Copyright (C) 2000 Philipp Rumpf
  7. * Copyright (C) 2000 David Howells
  8. * Copyright (C) 2002 - 2007 Paul Mundt
  9. *
  10. * This file is subject to the terms and conditions of the GNU General Public
  11. * License. See the file "COPYING" in the main directory of this archive
  12. * for more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/init.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/module.h>
  19. #include <linux/kallsyms.h>
  20. #include <linux/io.h>
  21. #include <linux/bug.h>
  22. #include <linux/debug_locks.h>
  23. #include <linux/kdebug.h>
  24. #include <linux/kexec.h>
  25. #include <linux/limits.h>
  26. #include <asm/system.h>
  27. #include <asm/uaccess.h>
  28. #ifdef CONFIG_SH_KGDB
  29. #include <asm/kgdb.h>
  30. #define CHK_REMOTE_DEBUG(regs) \
  31. { \
  32. if (kgdb_debug_hook && !user_mode(regs))\
  33. (*kgdb_debug_hook)(regs); \
  34. }
  35. #else
  36. #define CHK_REMOTE_DEBUG(regs)
  37. #endif
  38. #ifdef CONFIG_CPU_SH2
  39. # define TRAP_RESERVED_INST 4
  40. # define TRAP_ILLEGAL_SLOT_INST 6
  41. # define TRAP_ADDRESS_ERROR 9
  42. # ifdef CONFIG_CPU_SH2A
  43. # define TRAP_DIVZERO_ERROR 17
  44. # define TRAP_DIVOVF_ERROR 18
  45. # endif
  46. #else
  47. #define TRAP_RESERVED_INST 12
  48. #define TRAP_ILLEGAL_SLOT_INST 13
  49. #endif
  50. static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
  51. {
  52. unsigned long p;
  53. int i;
  54. printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
  55. for (p = bottom & ~31; p < top; ) {
  56. printk("%04lx: ", p & 0xffff);
  57. for (i = 0; i < 8; i++, p += 4) {
  58. unsigned int val;
  59. if (p < bottom || p >= top)
  60. printk(" ");
  61. else {
  62. if (__get_user(val, (unsigned int __user *)p)) {
  63. printk("\n");
  64. return;
  65. }
  66. printk("%08x ", val);
  67. }
  68. }
  69. printk("\n");
  70. }
  71. }
  72. static DEFINE_SPINLOCK(die_lock);
  73. void die(const char * str, struct pt_regs * regs, long err)
  74. {
  75. static int die_counter;
  76. oops_enter();
  77. console_verbose();
  78. spin_lock_irq(&die_lock);
  79. bust_spinlocks(1);
  80. printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
  81. CHK_REMOTE_DEBUG(regs);
  82. print_modules();
  83. show_regs(regs);
  84. printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
  85. task_pid_nr(current), task_stack_page(current) + 1);
  86. if (!user_mode(regs) || in_interrupt())
  87. dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
  88. (unsigned long)task_stack_page(current));
  89. bust_spinlocks(0);
  90. add_taint(TAINT_DIE);
  91. spin_unlock_irq(&die_lock);
  92. if (kexec_should_crash(current))
  93. crash_kexec(regs);
  94. if (in_interrupt())
  95. panic("Fatal exception in interrupt");
  96. if (panic_on_oops)
  97. panic("Fatal exception");
  98. oops_exit();
  99. do_exit(SIGSEGV);
  100. }
  101. static inline void die_if_kernel(const char *str, struct pt_regs *regs,
  102. long err)
  103. {
  104. if (!user_mode(regs))
  105. die(str, regs, err);
  106. }
  107. /*
  108. * try and fix up kernelspace address errors
  109. * - userspace errors just cause EFAULT to be returned, resulting in SEGV
  110. * - kernel/userspace interfaces cause a jump to an appropriate handler
  111. * - other kernel errors are bad
  112. * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
  113. */
  114. static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
  115. {
  116. if (!user_mode(regs)) {
  117. const struct exception_table_entry *fixup;
  118. fixup = search_exception_tables(regs->pc);
  119. if (fixup) {
  120. regs->pc = fixup->fixup;
  121. return 0;
  122. }
  123. die(str, regs, err);
  124. }
  125. return -EFAULT;
  126. }
  127. static inline void sign_extend(unsigned int count, unsigned char *dst)
  128. {
  129. #ifdef __LITTLE_ENDIAN__
  130. if ((count == 1) && dst[0] & 0x80) {
  131. dst[1] = 0xff;
  132. dst[2] = 0xff;
  133. dst[3] = 0xff;
  134. }
  135. if ((count == 2) && dst[1] & 0x80) {
  136. dst[2] = 0xff;
  137. dst[3] = 0xff;
  138. }
  139. #else
  140. if ((count == 1) && dst[3] & 0x80) {
  141. dst[2] = 0xff;
  142. dst[1] = 0xff;
  143. dst[0] = 0xff;
  144. }
  145. if ((count == 2) && dst[2] & 0x80) {
  146. dst[1] = 0xff;
  147. dst[0] = 0xff;
  148. }
  149. #endif
  150. }
  151. static struct mem_access user_mem_access = {
  152. copy_from_user,
  153. copy_to_user,
  154. };
  155. /*
  156. * handle an instruction that does an unaligned memory access by emulating the
  157. * desired behaviour
  158. * - note that PC _may not_ point to the faulting instruction
  159. * (if that instruction is in a branch delay slot)
  160. * - return 0 if emulation okay, -EFAULT on existential error
  161. */
  162. static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
  163. struct mem_access *ma)
  164. {
  165. int ret, index, count;
  166. unsigned long *rm, *rn;
  167. unsigned char *src, *dst;
  168. index = (instruction>>8)&15; /* 0x0F00 */
  169. rn = &regs->regs[index];
  170. index = (instruction>>4)&15; /* 0x00F0 */
  171. rm = &regs->regs[index];
  172. count = 1<<(instruction&3);
  173. ret = -EFAULT;
  174. switch (instruction>>12) {
  175. case 0: /* mov.[bwl] to/from memory via r0+rn */
  176. if (instruction & 8) {
  177. /* from memory */
  178. src = (unsigned char*) *rm;
  179. src += regs->regs[0];
  180. dst = (unsigned char*) rn;
  181. *(unsigned long*)dst = 0;
  182. #if !defined(__LITTLE_ENDIAN__)
  183. dst += 4-count;
  184. #endif
  185. if (ma->from(dst, src, count))
  186. goto fetch_fault;
  187. sign_extend(count, dst);
  188. } else {
  189. /* to memory */
  190. src = (unsigned char*) rm;
  191. #if !defined(__LITTLE_ENDIAN__)
  192. src += 4-count;
  193. #endif
  194. dst = (unsigned char*) *rn;
  195. dst += regs->regs[0];
  196. if (ma->to(dst, src, count))
  197. goto fetch_fault;
  198. }
  199. ret = 0;
  200. break;
  201. case 1: /* mov.l Rm,@(disp,Rn) */
  202. src = (unsigned char*) rm;
  203. dst = (unsigned char*) *rn;
  204. dst += (instruction&0x000F)<<2;
  205. if (ma->to(dst, src, 4))
  206. goto fetch_fault;
  207. ret = 0;
  208. break;
  209. case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
  210. if (instruction & 4)
  211. *rn -= count;
  212. src = (unsigned char*) rm;
  213. dst = (unsigned char*) *rn;
  214. #if !defined(__LITTLE_ENDIAN__)
  215. src += 4-count;
  216. #endif
  217. if (ma->to(dst, src, count))
  218. goto fetch_fault;
  219. ret = 0;
  220. break;
  221. case 5: /* mov.l @(disp,Rm),Rn */
  222. src = (unsigned char*) *rm;
  223. src += (instruction&0x000F)<<2;
  224. dst = (unsigned char*) rn;
  225. *(unsigned long*)dst = 0;
  226. if (ma->from(dst, src, 4))
  227. goto fetch_fault;
  228. ret = 0;
  229. break;
  230. case 6: /* mov.[bwl] from memory, possibly with post-increment */
  231. src = (unsigned char*) *rm;
  232. if (instruction & 4)
  233. *rm += count;
  234. dst = (unsigned char*) rn;
  235. *(unsigned long*)dst = 0;
  236. #if !defined(__LITTLE_ENDIAN__)
  237. dst += 4-count;
  238. #endif
  239. if (ma->from(dst, src, count))
  240. goto fetch_fault;
  241. sign_extend(count, dst);
  242. ret = 0;
  243. break;
  244. case 8:
  245. switch ((instruction&0xFF00)>>8) {
  246. case 0x81: /* mov.w R0,@(disp,Rn) */
  247. src = (unsigned char*) &regs->regs[0];
  248. #if !defined(__LITTLE_ENDIAN__)
  249. src += 2;
  250. #endif
  251. dst = (unsigned char*) *rm; /* called Rn in the spec */
  252. dst += (instruction&0x000F)<<1;
  253. if (ma->to(dst, src, 2))
  254. goto fetch_fault;
  255. ret = 0;
  256. break;
  257. case 0x85: /* mov.w @(disp,Rm),R0 */
  258. src = (unsigned char*) *rm;
  259. src += (instruction&0x000F)<<1;
  260. dst = (unsigned char*) &regs->regs[0];
  261. *(unsigned long*)dst = 0;
  262. #if !defined(__LITTLE_ENDIAN__)
  263. dst += 2;
  264. #endif
  265. if (ma->from(dst, src, 2))
  266. goto fetch_fault;
  267. sign_extend(2, dst);
  268. ret = 0;
  269. break;
  270. }
  271. break;
  272. }
  273. return ret;
  274. fetch_fault:
  275. /* Argh. Address not only misaligned but also non-existent.
  276. * Raise an EFAULT and see if it's trapped
  277. */
  278. return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
  279. }
  280. /*
  281. * emulate the instruction in the delay slot
  282. * - fetches the instruction from PC+2
  283. */
  284. static inline int handle_delayslot(struct pt_regs *regs,
  285. opcode_t old_instruction,
  286. struct mem_access *ma)
  287. {
  288. opcode_t instruction;
  289. void *addr = (void *)(regs->pc + instruction_size(old_instruction));
  290. if (copy_from_user(&instruction, addr, sizeof(instruction))) {
  291. /* the instruction-fetch faulted */
  292. if (user_mode(regs))
  293. return -EFAULT;
  294. /* kernel */
  295. die("delay-slot-insn faulting in handle_unaligned_delayslot",
  296. regs, 0);
  297. }
  298. return handle_unaligned_ins(instruction, regs, ma);
  299. }
  300. /*
  301. * handle an instruction that does an unaligned memory access
  302. * - have to be careful of branch delay-slot instructions that fault
  303. * SH3:
  304. * - if the branch would be taken PC points to the branch
  305. * - if the branch would not be taken, PC points to delay-slot
  306. * SH4:
  307. * - PC always points to delayed branch
  308. * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
  309. */
  310. /* Macros to determine offset from current PC for branch instructions */
  311. /* Explicit type coercion is used to force sign extension where needed */
  312. #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
  313. #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
  314. /*
  315. * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
  316. * opcodes..
  317. */
  318. static int handle_unaligned_notify_count = 10;
  319. int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
  320. struct mem_access *ma)
  321. {
  322. u_int rm;
  323. int ret, index;
  324. index = (instruction>>8)&15; /* 0x0F00 */
  325. rm = regs->regs[index];
  326. /* shout about the first ten userspace fixups */
  327. if (user_mode(regs) && handle_unaligned_notify_count>0) {
  328. handle_unaligned_notify_count--;
  329. printk(KERN_NOTICE "Fixing up unaligned userspace access "
  330. "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
  331. current->comm, task_pid_nr(current),
  332. (void *)regs->pc, instruction);
  333. }
  334. ret = -EFAULT;
  335. switch (instruction&0xF000) {
  336. case 0x0000:
  337. if (instruction==0x000B) {
  338. /* rts */
  339. ret = handle_delayslot(regs, instruction, ma);
  340. if (ret==0)
  341. regs->pc = regs->pr;
  342. }
  343. else if ((instruction&0x00FF)==0x0023) {
  344. /* braf @Rm */
  345. ret = handle_delayslot(regs, instruction, ma);
  346. if (ret==0)
  347. regs->pc += rm + 4;
  348. }
  349. else if ((instruction&0x00FF)==0x0003) {
  350. /* bsrf @Rm */
  351. ret = handle_delayslot(regs, instruction, ma);
  352. if (ret==0) {
  353. regs->pr = regs->pc + 4;
  354. regs->pc += rm + 4;
  355. }
  356. }
  357. else {
  358. /* mov.[bwl] to/from memory via r0+rn */
  359. goto simple;
  360. }
  361. break;
  362. case 0x1000: /* mov.l Rm,@(disp,Rn) */
  363. goto simple;
  364. case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
  365. goto simple;
  366. case 0x4000:
  367. if ((instruction&0x00FF)==0x002B) {
  368. /* jmp @Rm */
  369. ret = handle_delayslot(regs, instruction, ma);
  370. if (ret==0)
  371. regs->pc = rm;
  372. }
  373. else if ((instruction&0x00FF)==0x000B) {
  374. /* jsr @Rm */
  375. ret = handle_delayslot(regs, instruction, ma);
  376. if (ret==0) {
  377. regs->pr = regs->pc + 4;
  378. regs->pc = rm;
  379. }
  380. }
  381. else {
  382. /* mov.[bwl] to/from memory via r0+rn */
  383. goto simple;
  384. }
  385. break;
  386. case 0x5000: /* mov.l @(disp,Rm),Rn */
  387. goto simple;
  388. case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
  389. goto simple;
  390. case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
  391. switch (instruction&0x0F00) {
  392. case 0x0100: /* mov.w R0,@(disp,Rm) */
  393. goto simple;
  394. case 0x0500: /* mov.w @(disp,Rm),R0 */
  395. goto simple;
  396. case 0x0B00: /* bf lab - no delayslot*/
  397. break;
  398. case 0x0F00: /* bf/s lab */
  399. ret = handle_delayslot(regs, instruction, ma);
  400. if (ret==0) {
  401. #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
  402. if ((regs->sr & 0x00000001) != 0)
  403. regs->pc += 4; /* next after slot */
  404. else
  405. #endif
  406. regs->pc += SH_PC_8BIT_OFFSET(instruction);
  407. }
  408. break;
  409. case 0x0900: /* bt lab - no delayslot */
  410. break;
  411. case 0x0D00: /* bt/s lab */
  412. ret = handle_delayslot(regs, instruction, ma);
  413. if (ret==0) {
  414. #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
  415. if ((regs->sr & 0x00000001) == 0)
  416. regs->pc += 4; /* next after slot */
  417. else
  418. #endif
  419. regs->pc += SH_PC_8BIT_OFFSET(instruction);
  420. }
  421. break;
  422. }
  423. break;
  424. case 0xA000: /* bra label */
  425. ret = handle_delayslot(regs, instruction, ma);
  426. if (ret==0)
  427. regs->pc += SH_PC_12BIT_OFFSET(instruction);
  428. break;
  429. case 0xB000: /* bsr label */
  430. ret = handle_delayslot(regs, instruction, ma);
  431. if (ret==0) {
  432. regs->pr = regs->pc + 4;
  433. regs->pc += SH_PC_12BIT_OFFSET(instruction);
  434. }
  435. break;
  436. }
  437. return ret;
  438. /* handle non-delay-slot instruction */
  439. simple:
  440. ret = handle_unaligned_ins(instruction, regs, ma);
  441. if (ret==0)
  442. regs->pc += instruction_size(instruction);
  443. return ret;
  444. }
  445. #ifdef CONFIG_CPU_HAS_SR_RB
  446. #define lookup_exception_vector(x) \
  447. __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
  448. #else
  449. #define lookup_exception_vector(x) \
  450. __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
  451. #endif
  452. /*
  453. * Handle various address error exceptions:
  454. * - instruction address error:
  455. * misaligned PC
  456. * PC >= 0x80000000 in user mode
  457. * - data address error (read and write)
  458. * misaligned data access
  459. * access to >= 0x80000000 is user mode
  460. * Unfortuntaly we can't distinguish between instruction address error
  461. * and data address errors caused by read accesses.
  462. */
  463. asmlinkage void do_address_error(struct pt_regs *regs,
  464. unsigned long writeaccess,
  465. unsigned long address)
  466. {
  467. unsigned long error_code = 0;
  468. mm_segment_t oldfs;
  469. siginfo_t info;
  470. opcode_t instruction;
  471. int tmp;
  472. /* Intentional ifdef */
  473. #ifdef CONFIG_CPU_HAS_SR_RB
  474. lookup_exception_vector(error_code);
  475. #endif
  476. oldfs = get_fs();
  477. if (user_mode(regs)) {
  478. int si_code = BUS_ADRERR;
  479. local_irq_enable();
  480. /* bad PC is not something we can fix */
  481. if (regs->pc & 1) {
  482. si_code = BUS_ADRALN;
  483. goto uspace_segv;
  484. }
  485. set_fs(USER_DS);
  486. if (copy_from_user(&instruction, (void *)(regs->pc),
  487. sizeof(instruction))) {
  488. /* Argh. Fault on the instruction itself.
  489. This should never happen non-SMP
  490. */
  491. set_fs(oldfs);
  492. goto uspace_segv;
  493. }
  494. tmp = handle_unaligned_access(instruction, regs,
  495. &user_mem_access);
  496. set_fs(oldfs);
  497. if (tmp==0)
  498. return; /* sorted */
  499. uspace_segv:
  500. printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
  501. "access (PC %lx PR %lx)\n", current->comm, regs->pc,
  502. regs->pr);
  503. info.si_signo = SIGBUS;
  504. info.si_errno = 0;
  505. info.si_code = si_code;
  506. info.si_addr = (void __user *)address;
  507. force_sig_info(SIGBUS, &info, current);
  508. } else {
  509. if (regs->pc & 1)
  510. die("unaligned program counter", regs, error_code);
  511. set_fs(KERNEL_DS);
  512. if (copy_from_user(&instruction, (void *)(regs->pc),
  513. sizeof(instruction))) {
  514. /* Argh. Fault on the instruction itself.
  515. This should never happen non-SMP
  516. */
  517. set_fs(oldfs);
  518. die("insn faulting in do_address_error", regs, 0);
  519. }
  520. handle_unaligned_access(instruction, regs, &user_mem_access);
  521. set_fs(oldfs);
  522. }
  523. }
  524. #ifdef CONFIG_SH_DSP
  525. /*
  526. * SH-DSP support gerg@snapgear.com.
  527. */
  528. int is_dsp_inst(struct pt_regs *regs)
  529. {
  530. unsigned short inst = 0;
  531. /*
  532. * Safe guard if DSP mode is already enabled or we're lacking
  533. * the DSP altogether.
  534. */
  535. if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
  536. return 0;
  537. get_user(inst, ((unsigned short *) regs->pc));
  538. inst &= 0xf000;
  539. /* Check for any type of DSP or support instruction */
  540. if ((inst == 0xf000) || (inst == 0x4000))
  541. return 1;
  542. return 0;
  543. }
  544. #else
  545. #define is_dsp_inst(regs) (0)
  546. #endif /* CONFIG_SH_DSP */
  547. #ifdef CONFIG_CPU_SH2A
  548. asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
  549. unsigned long r6, unsigned long r7,
  550. struct pt_regs __regs)
  551. {
  552. siginfo_t info;
  553. switch (r4) {
  554. case TRAP_DIVZERO_ERROR:
  555. info.si_code = FPE_INTDIV;
  556. break;
  557. case TRAP_DIVOVF_ERROR:
  558. info.si_code = FPE_INTOVF;
  559. break;
  560. }
  561. force_sig_info(SIGFPE, &info, current);
  562. }
  563. #endif
  564. asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
  565. unsigned long r6, unsigned long r7,
  566. struct pt_regs __regs)
  567. {
  568. struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
  569. unsigned long error_code;
  570. struct task_struct *tsk = current;
  571. #ifdef CONFIG_SH_FPU_EMU
  572. unsigned short inst = 0;
  573. int err;
  574. get_user(inst, (unsigned short*)regs->pc);
  575. err = do_fpu_inst(inst, regs);
  576. if (!err) {
  577. regs->pc += instruction_size(inst);
  578. return;
  579. }
  580. /* not a FPU inst. */
  581. #endif
  582. #ifdef CONFIG_SH_DSP
  583. /* Check if it's a DSP instruction */
  584. if (is_dsp_inst(regs)) {
  585. /* Enable DSP mode, and restart instruction. */
  586. regs->sr |= SR_DSP;
  587. return;
  588. }
  589. #endif
  590. lookup_exception_vector(error_code);
  591. local_irq_enable();
  592. CHK_REMOTE_DEBUG(regs);
  593. force_sig(SIGILL, tsk);
  594. die_if_no_fixup("reserved instruction", regs, error_code);
  595. }
  596. #ifdef CONFIG_SH_FPU_EMU
  597. static int emulate_branch(unsigned short inst, struct pt_regs* regs)
  598. {
  599. /*
  600. * bfs: 8fxx: PC+=d*2+4;
  601. * bts: 8dxx: PC+=d*2+4;
  602. * bra: axxx: PC+=D*2+4;
  603. * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
  604. * braf:0x23: PC+=Rn*2+4;
  605. * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
  606. * jmp: 4x2b: PC=Rn;
  607. * jsr: 4x0b: PC=Rn after PR=PC+4;
  608. * rts: 000b: PC=PR;
  609. */
  610. if ((inst & 0xfd00) == 0x8d00) {
  611. regs->pc += SH_PC_8BIT_OFFSET(inst);
  612. return 0;
  613. }
  614. if ((inst & 0xe000) == 0xa000) {
  615. regs->pc += SH_PC_12BIT_OFFSET(inst);
  616. return 0;
  617. }
  618. if ((inst & 0xf0df) == 0x0003) {
  619. regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
  620. return 0;
  621. }
  622. if ((inst & 0xf0df) == 0x400b) {
  623. regs->pc = regs->regs[(inst & 0x0f00) >> 8];
  624. return 0;
  625. }
  626. if ((inst & 0xffff) == 0x000b) {
  627. regs->pc = regs->pr;
  628. return 0;
  629. }
  630. return 1;
  631. }
  632. #endif
  633. asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
  634. unsigned long r6, unsigned long r7,
  635. struct pt_regs __regs)
  636. {
  637. struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
  638. unsigned long error_code;
  639. struct task_struct *tsk = current;
  640. #ifdef CONFIG_SH_FPU_EMU
  641. unsigned short inst = 0;
  642. get_user(inst, (unsigned short *)regs->pc + 1);
  643. if (!do_fpu_inst(inst, regs)) {
  644. get_user(inst, (unsigned short *)regs->pc);
  645. if (!emulate_branch(inst, regs))
  646. return;
  647. /* fault in branch.*/
  648. }
  649. /* not a FPU inst. */
  650. #endif
  651. lookup_exception_vector(error_code);
  652. local_irq_enable();
  653. CHK_REMOTE_DEBUG(regs);
  654. force_sig(SIGILL, tsk);
  655. die_if_no_fixup("illegal slot instruction", regs, error_code);
  656. }
  657. asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
  658. unsigned long r6, unsigned long r7,
  659. struct pt_regs __regs)
  660. {
  661. struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
  662. long ex;
  663. lookup_exception_vector(ex);
  664. die_if_kernel("exception", regs, ex);
  665. }
  666. #if defined(CONFIG_SH_STANDARD_BIOS)
  667. void *gdb_vbr_vector;
  668. static inline void __init gdb_vbr_init(void)
  669. {
  670. register unsigned long vbr;
  671. /*
  672. * Read the old value of the VBR register to initialise
  673. * the vector through which debug and BIOS traps are
  674. * delegated by the Linux trap handler.
  675. */
  676. asm volatile("stc vbr, %0" : "=r" (vbr));
  677. gdb_vbr_vector = (void *)(vbr + 0x100);
  678. printk("Setting GDB trap vector to 0x%08lx\n",
  679. (unsigned long)gdb_vbr_vector);
  680. }
  681. #endif
  682. void __cpuinit per_cpu_trap_init(void)
  683. {
  684. extern void *vbr_base;
  685. #ifdef CONFIG_SH_STANDARD_BIOS
  686. if (raw_smp_processor_id() == 0)
  687. gdb_vbr_init();
  688. #endif
  689. /* NOTE: The VBR value should be at P1
  690. (or P2, virtural "fixed" address space).
  691. It's definitely should not in physical address. */
  692. asm volatile("ldc %0, vbr"
  693. : /* no output */
  694. : "r" (&vbr_base)
  695. : "memory");
  696. }
  697. void *set_exception_table_vec(unsigned int vec, void *handler)
  698. {
  699. extern void *exception_handling_table[];
  700. void *old_handler;
  701. old_handler = exception_handling_table[vec];
  702. exception_handling_table[vec] = handler;
  703. return old_handler;
  704. }
  705. void __init trap_init(void)
  706. {
  707. set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
  708. set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
  709. #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
  710. defined(CONFIG_SH_FPU_EMU)
  711. /*
  712. * For SH-4 lacking an FPU, treat floating point instructions as
  713. * reserved. They'll be handled in the math-emu case, or faulted on
  714. * otherwise.
  715. */
  716. set_exception_table_evt(0x800, do_reserved_inst);
  717. set_exception_table_evt(0x820, do_illegal_slot_inst);
  718. #elif defined(CONFIG_SH_FPU)
  719. #ifdef CONFIG_CPU_SUBTYPE_SHX3
  720. set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
  721. set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
  722. #else
  723. set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
  724. set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
  725. #endif
  726. #endif
  727. #ifdef CONFIG_CPU_SH2
  728. set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
  729. #endif
  730. #ifdef CONFIG_CPU_SH2A
  731. set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
  732. set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
  733. #endif
  734. /* Setup VBR for boot cpu */
  735. per_cpu_trap_init();
  736. }
  737. void show_trace(struct task_struct *tsk, unsigned long *sp,
  738. struct pt_regs *regs)
  739. {
  740. unsigned long addr;
  741. if (regs && user_mode(regs))
  742. return;
  743. printk("\nCall trace: ");
  744. #ifdef CONFIG_KALLSYMS
  745. printk("\n");
  746. #endif
  747. while (!kstack_end(sp)) {
  748. addr = *sp++;
  749. if (kernel_text_address(addr))
  750. print_ip_sym(addr);
  751. }
  752. printk("\n");
  753. if (!tsk)
  754. tsk = current;
  755. debug_show_held_locks(tsk);
  756. }
  757. void show_stack(struct task_struct *tsk, unsigned long *sp)
  758. {
  759. unsigned long stack;
  760. if (!tsk)
  761. tsk = current;
  762. if (tsk == current)
  763. sp = (unsigned long *)current_stack_pointer;
  764. else
  765. sp = (unsigned long *)tsk->thread.sp;
  766. stack = (unsigned long)sp;
  767. dump_mem("Stack: ", stack, THREAD_SIZE +
  768. (unsigned long)task_stack_page(tsk));
  769. show_trace(tsk, sp, NULL);
  770. }
  771. void dump_stack(void)
  772. {
  773. show_stack(NULL, NULL);
  774. }
  775. EXPORT_SYMBOL(dump_stack);