traps.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * linux/arch/m32r/kernel/traps.c
  3. *
  4. * Copyright (C) 2001, 2002 Hirokazu Takata, Hiroyuki Kondo,
  5. * Hitoshi Yamamoto
  6. */
  7. /*
  8. * 'traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'entry.S'.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kallsyms.h>
  14. #include <linux/stddef.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/mm.h>
  17. #include <asm/page.h>
  18. #include <asm/processor.h>
  19. #include <asm/system.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/io.h>
  22. #include <asm/atomic.h>
  23. #include <asm/smp.h>
  24. #include <linux/module.h>
  25. asmlinkage void alignment_check(void);
  26. asmlinkage void ei_handler(void);
  27. asmlinkage void rie_handler(void);
  28. asmlinkage void debug_trap(void);
  29. asmlinkage void cache_flushing_handler(void);
  30. asmlinkage void ill_trap(void);
  31. #ifdef CONFIG_SMP
  32. extern void smp_reschedule_interrupt(void);
  33. extern void smp_invalidate_interrupt(void);
  34. extern void smp_call_function_interrupt(void);
  35. extern void smp_ipi_timer_interrupt(void);
  36. extern void smp_flush_cache_all_interrupt(void);
  37. extern void smp_call_function_single_interrupt(void);
  38. /*
  39. * for Boot AP function
  40. */
  41. asm (
  42. " .section .eit_vector4,\"ax\" \n"
  43. " .global _AP_RE \n"
  44. " .global startup_AP \n"
  45. "_AP_RE: \n"
  46. " .fill 32, 4, 0 \n"
  47. "_AP_EI: bra startup_AP \n"
  48. " .previous \n"
  49. );
  50. #endif /* CONFIG_SMP */
  51. extern unsigned long eit_vector[];
  52. #define BRA_INSN(func, entry) \
  53. ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \
  54. + 0xff000000UL
  55. static void set_eit_vector_entries(void)
  56. {
  57. extern void default_eit_handler(void);
  58. extern void system_call(void);
  59. extern void pie_handler(void);
  60. extern void ace_handler(void);
  61. extern void tme_handler(void);
  62. extern void _flush_cache_copyback_all(void);
  63. eit_vector[0] = 0xd0c00001; /* seth r0, 0x01 */
  64. eit_vector[1] = BRA_INSN(default_eit_handler, 1);
  65. eit_vector[4] = 0xd0c00010; /* seth r0, 0x10 */
  66. eit_vector[5] = BRA_INSN(default_eit_handler, 5);
  67. eit_vector[8] = BRA_INSN(rie_handler, 8);
  68. eit_vector[12] = BRA_INSN(alignment_check, 12);
  69. eit_vector[16] = BRA_INSN(ill_trap, 16);
  70. eit_vector[17] = BRA_INSN(debug_trap, 17);
  71. eit_vector[18] = BRA_INSN(system_call, 18);
  72. eit_vector[19] = BRA_INSN(ill_trap, 19);
  73. eit_vector[20] = BRA_INSN(ill_trap, 20);
  74. eit_vector[21] = BRA_INSN(ill_trap, 21);
  75. eit_vector[22] = BRA_INSN(ill_trap, 22);
  76. eit_vector[23] = BRA_INSN(ill_trap, 23);
  77. eit_vector[24] = BRA_INSN(ill_trap, 24);
  78. eit_vector[25] = BRA_INSN(ill_trap, 25);
  79. eit_vector[26] = BRA_INSN(ill_trap, 26);
  80. eit_vector[27] = BRA_INSN(ill_trap, 27);
  81. eit_vector[28] = BRA_INSN(cache_flushing_handler, 28);
  82. eit_vector[29] = BRA_INSN(ill_trap, 29);
  83. eit_vector[30] = BRA_INSN(ill_trap, 30);
  84. eit_vector[31] = BRA_INSN(ill_trap, 31);
  85. eit_vector[32] = BRA_INSN(ei_handler, 32);
  86. eit_vector[64] = BRA_INSN(pie_handler, 64);
  87. #ifdef CONFIG_MMU
  88. eit_vector[68] = BRA_INSN(ace_handler, 68);
  89. eit_vector[72] = BRA_INSN(tme_handler, 72);
  90. #endif /* CONFIG_MMU */
  91. #ifdef CONFIG_SMP
  92. eit_vector[184] = (unsigned long)smp_reschedule_interrupt;
  93. eit_vector[185] = (unsigned long)smp_invalidate_interrupt;
  94. eit_vector[186] = (unsigned long)smp_call_function_interrupt;
  95. eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
  96. eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
  97. eit_vector[189] = (unsigned long)smp_call_function_single_interrupt;
  98. eit_vector[190] = 0;
  99. eit_vector[191] = 0;
  100. #endif
  101. _flush_cache_copyback_all();
  102. }
  103. void __init trap_init(void)
  104. {
  105. set_eit_vector_entries();
  106. /*
  107. * Should be a barrier for any external CPU state.
  108. */
  109. cpu_init();
  110. }
  111. static int kstack_depth_to_print = 24;
  112. static void show_trace(struct task_struct *task, unsigned long *stack)
  113. {
  114. unsigned long addr;
  115. if (!stack)
  116. stack = (unsigned long*)&stack;
  117. printk("Call Trace: ");
  118. while (!kstack_end(stack)) {
  119. addr = *stack++;
  120. if (__kernel_text_address(addr)) {
  121. printk("[<%08lx>] ", addr);
  122. print_symbol("%s\n", addr);
  123. }
  124. }
  125. printk("\n");
  126. }
  127. void show_stack(struct task_struct *task, unsigned long *sp)
  128. {
  129. unsigned long *stack;
  130. int i;
  131. /*
  132. * debugging aid: "show_stack(NULL);" prints the
  133. * back trace for this cpu.
  134. */
  135. if(sp==NULL) {
  136. if (task)
  137. sp = (unsigned long *)task->thread.sp;
  138. else
  139. sp=(unsigned long*)&sp;
  140. }
  141. stack = sp;
  142. for(i=0; i < kstack_depth_to_print; i++) {
  143. if (kstack_end(stack))
  144. break;
  145. if (i && ((i % 4) == 0))
  146. printk("\n ");
  147. printk("%08lx ", *stack++);
  148. }
  149. printk("\n");
  150. show_trace(task, sp);
  151. }
  152. void dump_stack(void)
  153. {
  154. unsigned long stack;
  155. show_trace(current, &stack);
  156. }
  157. EXPORT_SYMBOL(dump_stack);
  158. static void show_registers(struct pt_regs *regs)
  159. {
  160. int i = 0;
  161. int in_kernel = 1;
  162. unsigned long sp;
  163. printk("CPU: %d\n", smp_processor_id());
  164. show_regs(regs);
  165. sp = (unsigned long) (1+regs);
  166. if (user_mode(regs)) {
  167. in_kernel = 0;
  168. sp = regs->spu;
  169. printk("SPU: %08lx\n", sp);
  170. } else {
  171. printk("SPI: %08lx\n", sp);
  172. }
  173. printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
  174. current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
  175. /*
  176. * When in-kernel, we also print out the stack and code at the
  177. * time of the fault..
  178. */
  179. if (in_kernel) {
  180. printk("\nStack: ");
  181. show_stack(current, (unsigned long*) sp);
  182. printk("\nCode: ");
  183. if (regs->bpc < PAGE_OFFSET)
  184. goto bad;
  185. for(i=0;i<20;i++) {
  186. unsigned char c;
  187. if (__get_user(c, &((unsigned char*)regs->bpc)[i])) {
  188. bad:
  189. printk(" Bad PC value.");
  190. break;
  191. }
  192. printk("%02x ", c);
  193. }
  194. }
  195. printk("\n");
  196. }
  197. static DEFINE_SPINLOCK(die_lock);
  198. void die(const char * str, struct pt_regs * regs, long err)
  199. {
  200. console_verbose();
  201. spin_lock_irq(&die_lock);
  202. bust_spinlocks(1);
  203. printk("%s: %04lx\n", str, err & 0xffff);
  204. show_registers(regs);
  205. bust_spinlocks(0);
  206. spin_unlock_irq(&die_lock);
  207. do_exit(SIGSEGV);
  208. }
  209. static __inline__ void die_if_kernel(const char * str,
  210. struct pt_regs * regs, long err)
  211. {
  212. if (!user_mode(regs))
  213. die(str, regs, err);
  214. }
  215. static __inline__ void do_trap(int trapnr, int signr, const char * str,
  216. struct pt_regs * regs, long error_code, siginfo_t *info)
  217. {
  218. if (user_mode(regs)) {
  219. /* trap_signal */
  220. struct task_struct *tsk = current;
  221. tsk->thread.error_code = error_code;
  222. tsk->thread.trap_no = trapnr;
  223. if (info)
  224. force_sig_info(signr, info, tsk);
  225. else
  226. force_sig(signr, tsk);
  227. return;
  228. } else {
  229. /* kernel_trap */
  230. if (!fixup_exception(regs))
  231. die(str, regs, error_code);
  232. return;
  233. }
  234. }
  235. #define DO_ERROR(trapnr, signr, str, name) \
  236. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  237. { \
  238. do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
  239. }
  240. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  241. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  242. { \
  243. siginfo_t info; \
  244. info.si_signo = signr; \
  245. info.si_errno = 0; \
  246. info.si_code = sicode; \
  247. info.si_addr = (void __user *)siaddr; \
  248. do_trap(trapnr, signr, str, regs, error_code, &info); \
  249. }
  250. DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap)
  251. DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc)
  252. DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc)
  253. DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc)
  254. extern int handle_unaligned_access(unsigned long, struct pt_regs *);
  255. /* This code taken from arch/sh/kernel/traps.c */
  256. asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code)
  257. {
  258. mm_segment_t oldfs;
  259. unsigned long insn;
  260. int tmp;
  261. oldfs = get_fs();
  262. if (user_mode(regs)) {
  263. local_irq_enable();
  264. current->thread.error_code = error_code;
  265. current->thread.trap_no = 0x17;
  266. set_fs(USER_DS);
  267. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  268. set_fs(oldfs);
  269. goto uspace_segv;
  270. }
  271. tmp = handle_unaligned_access(insn, regs);
  272. set_fs(oldfs);
  273. if (!tmp)
  274. return;
  275. uspace_segv:
  276. printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
  277. "access\n", current->comm);
  278. force_sig(SIGSEGV, current);
  279. } else {
  280. set_fs(KERNEL_DS);
  281. if (copy_from_user(&insn, (void *)regs->bpc, 4)) {
  282. set_fs(oldfs);
  283. die("insn faulting in do_address_error", regs, 0);
  284. }
  285. handle_unaligned_access(insn, regs);
  286. set_fs(oldfs);
  287. }
  288. }