dumpstack_32.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. */
  5. #include <linux/kallsyms.h>
  6. #include <linux/kprobes.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/utsname.h>
  9. #include <linux/hardirq.h>
  10. #include <linux/kdebug.h>
  11. #include <linux/module.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/kexec.h>
  14. #include <linux/bug.h>
  15. #include <linux/nmi.h>
  16. #include <asm/stacktrace.h>
  17. #define STACKSLOTS_PER_LINE 8
  18. #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
  19. int panic_on_unrecovered_nmi;
  20. int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
  21. static unsigned int code_bytes = 64;
  22. static int die_counter;
  23. void printk_address(unsigned long address, int reliable)
  24. {
  25. printk(" [<%p>] %s%pS\n", (void *) address,
  26. reliable ? "" : "? ", (void *) address);
  27. }
  28. static inline int valid_stack_ptr(struct thread_info *tinfo,
  29. void *p, unsigned int size, void *end)
  30. {
  31. void *t = tinfo;
  32. if (end) {
  33. if (p < end && p >= (end-THREAD_SIZE))
  34. return 1;
  35. else
  36. return 0;
  37. }
  38. return p > t && p < t + THREAD_SIZE - size;
  39. }
  40. /* The form of the top of the frame on the stack */
  41. struct stack_frame {
  42. struct stack_frame *next_frame;
  43. unsigned long return_address;
  44. };
  45. static inline unsigned long
  46. print_context_stack(struct thread_info *tinfo,
  47. unsigned long *stack, unsigned long bp,
  48. const struct stacktrace_ops *ops, void *data,
  49. unsigned long *end)
  50. {
  51. struct stack_frame *frame = (struct stack_frame *)bp;
  52. while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
  53. unsigned long addr;
  54. addr = *stack;
  55. if (__kernel_text_address(addr)) {
  56. if ((unsigned long) stack == bp + sizeof(long)) {
  57. ops->address(data, addr, 1);
  58. frame = frame->next_frame;
  59. bp = (unsigned long) frame;
  60. } else {
  61. ops->address(data, addr, bp == 0);
  62. }
  63. }
  64. stack++;
  65. }
  66. return bp;
  67. }
  68. void dump_trace(struct task_struct *task, struct pt_regs *regs,
  69. unsigned long *stack, unsigned long bp,
  70. const struct stacktrace_ops *ops, void *data)
  71. {
  72. if (!task)
  73. task = current;
  74. if (!stack) {
  75. unsigned long dummy;
  76. stack = &dummy;
  77. if (task && task != current)
  78. stack = (unsigned long *)task->thread.sp;
  79. }
  80. #ifdef CONFIG_FRAME_POINTER
  81. if (!bp) {
  82. if (task == current) {
  83. /* Grab bp right from our regs */
  84. get_bp(bp);
  85. } else {
  86. /* bp is the last reg pushed by switch_to */
  87. bp = *(unsigned long *) task->thread.sp;
  88. }
  89. }
  90. #endif
  91. for (;;) {
  92. struct thread_info *context;
  93. context = (struct thread_info *)
  94. ((unsigned long)stack & (~(THREAD_SIZE - 1)));
  95. bp = print_context_stack(context, stack, bp, ops, data, NULL);
  96. stack = (unsigned long *)context->previous_esp;
  97. if (!stack)
  98. break;
  99. if (ops->stack(data, "IRQ") < 0)
  100. break;
  101. touch_nmi_watchdog();
  102. }
  103. }
  104. EXPORT_SYMBOL(dump_trace);
  105. static void
  106. print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
  107. {
  108. printk(data);
  109. print_symbol(msg, symbol);
  110. printk("\n");
  111. }
  112. static void print_trace_warning(void *data, char *msg)
  113. {
  114. printk("%s%s\n", (char *)data, msg);
  115. }
  116. static int print_trace_stack(void *data, char *name)
  117. {
  118. printk("%s <%s> ", (char *)data, name);
  119. return 0;
  120. }
  121. /*
  122. * Print one address/symbol entries per line.
  123. */
  124. static void print_trace_address(void *data, unsigned long addr, int reliable)
  125. {
  126. touch_nmi_watchdog();
  127. printk(data);
  128. printk_address(addr, reliable);
  129. }
  130. static const struct stacktrace_ops print_trace_ops = {
  131. .warning = print_trace_warning,
  132. .warning_symbol = print_trace_warning_symbol,
  133. .stack = print_trace_stack,
  134. .address = print_trace_address,
  135. };
  136. static void
  137. show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
  138. unsigned long *stack, unsigned long bp, char *log_lvl)
  139. {
  140. printk("%sCall Trace:\n", log_lvl);
  141. dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
  142. }
  143. void show_trace(struct task_struct *task, struct pt_regs *regs,
  144. unsigned long *stack, unsigned long bp)
  145. {
  146. show_trace_log_lvl(task, regs, stack, bp, "");
  147. }
  148. static void
  149. show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  150. unsigned long *sp, unsigned long bp, char *log_lvl)
  151. {
  152. unsigned long *stack;
  153. int i;
  154. if (sp == NULL) {
  155. if (task)
  156. sp = (unsigned long *)task->thread.sp;
  157. else
  158. sp = (unsigned long *)&sp;
  159. }
  160. stack = sp;
  161. for (i = 0; i < kstack_depth_to_print; i++) {
  162. if (kstack_end(stack))
  163. break;
  164. if (i && ((i % STACKSLOTS_PER_LINE) == 0))
  165. printk("\n%s", log_lvl);
  166. printk(" %08lx", *stack++);
  167. touch_nmi_watchdog();
  168. }
  169. printk("\n");
  170. show_trace_log_lvl(task, regs, sp, bp, log_lvl);
  171. }
  172. void show_stack(struct task_struct *task, unsigned long *sp)
  173. {
  174. show_stack_log_lvl(task, NULL, sp, 0, "");
  175. }
  176. /*
  177. * The architecture-independent dump_stack generator
  178. */
  179. void dump_stack(void)
  180. {
  181. unsigned long bp = 0;
  182. unsigned long stack;
  183. #ifdef CONFIG_FRAME_POINTER
  184. if (!bp)
  185. get_bp(bp);
  186. #endif
  187. printk("Pid: %d, comm: %.20s %s %s %.*s\n",
  188. current->pid, current->comm, print_tainted(),
  189. init_utsname()->release,
  190. (int)strcspn(init_utsname()->version, " "),
  191. init_utsname()->version);
  192. show_trace(NULL, NULL, &stack, bp);
  193. }
  194. EXPORT_SYMBOL(dump_stack);
  195. void show_registers(struct pt_regs *regs)
  196. {
  197. int i;
  198. print_modules();
  199. __show_regs(regs, 0);
  200. printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
  201. TASK_COMM_LEN, current->comm, task_pid_nr(current),
  202. current_thread_info(), current, task_thread_info(current));
  203. /*
  204. * When in-kernel, we also print out the stack and code at the
  205. * time of the fault..
  206. */
  207. if (!user_mode_vm(regs)) {
  208. unsigned int code_prologue = code_bytes * 43 / 64;
  209. unsigned int code_len = code_bytes;
  210. unsigned char c;
  211. u8 *ip;
  212. printk(KERN_EMERG "Stack:\n");
  213. show_stack_log_lvl(NULL, regs, &regs->sp,
  214. 0, KERN_EMERG);
  215. printk(KERN_EMERG "Code: ");
  216. ip = (u8 *)regs->ip - code_prologue;
  217. if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
  218. /* try starting at IP */
  219. ip = (u8 *)regs->ip;
  220. code_len = code_len - code_prologue + 1;
  221. }
  222. for (i = 0; i < code_len; i++, ip++) {
  223. if (ip < (u8 *)PAGE_OFFSET ||
  224. probe_kernel_address(ip, c)) {
  225. printk(" Bad EIP value.");
  226. break;
  227. }
  228. if (ip == (u8 *)regs->ip)
  229. printk("<%02x> ", c);
  230. else
  231. printk("%02x ", c);
  232. }
  233. }
  234. printk("\n");
  235. }
  236. int is_valid_bugaddr(unsigned long ip)
  237. {
  238. unsigned short ud2;
  239. if (ip < PAGE_OFFSET)
  240. return 0;
  241. if (probe_kernel_address((unsigned short *)ip, ud2))
  242. return 0;
  243. return ud2 == 0x0b0f;
  244. }
  245. static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
  246. static int die_owner = -1;
  247. static unsigned int die_nest_count;
  248. unsigned __kprobes long oops_begin(void)
  249. {
  250. unsigned long flags;
  251. oops_enter();
  252. if (die_owner != raw_smp_processor_id()) {
  253. console_verbose();
  254. raw_local_irq_save(flags);
  255. __raw_spin_lock(&die_lock);
  256. die_owner = smp_processor_id();
  257. die_nest_count = 0;
  258. bust_spinlocks(1);
  259. } else {
  260. raw_local_irq_save(flags);
  261. }
  262. die_nest_count++;
  263. return flags;
  264. }
  265. void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
  266. {
  267. bust_spinlocks(0);
  268. die_owner = -1;
  269. add_taint(TAINT_DIE);
  270. __raw_spin_unlock(&die_lock);
  271. raw_local_irq_restore(flags);
  272. if (!regs)
  273. return;
  274. if (kexec_should_crash(current))
  275. crash_kexec(regs);
  276. if (in_interrupt())
  277. panic("Fatal exception in interrupt");
  278. if (panic_on_oops)
  279. panic("Fatal exception");
  280. oops_exit();
  281. do_exit(signr);
  282. }
  283. int __kprobes __die(const char *str, struct pt_regs *regs, long err)
  284. {
  285. unsigned short ss;
  286. unsigned long sp;
  287. printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
  288. #ifdef CONFIG_PREEMPT
  289. printk("PREEMPT ");
  290. #endif
  291. #ifdef CONFIG_SMP
  292. printk("SMP ");
  293. #endif
  294. #ifdef CONFIG_DEBUG_PAGEALLOC
  295. printk("DEBUG_PAGEALLOC");
  296. #endif
  297. printk("\n");
  298. if (notify_die(DIE_OOPS, str, regs, err,
  299. current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
  300. return 1;
  301. show_registers(regs);
  302. /* Executive summary in case the oops scrolled away */
  303. sp = (unsigned long) (&regs->sp);
  304. savesegment(ss, ss);
  305. if (user_mode(regs)) {
  306. sp = regs->sp;
  307. ss = regs->ss & 0xffff;
  308. }
  309. printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
  310. print_symbol("%s", regs->ip);
  311. printk(" SS:ESP %04x:%08lx\n", ss, sp);
  312. return 0;
  313. }
  314. /*
  315. * This is gone through when something in the kernel has done something bad
  316. * and is about to be terminated:
  317. */
  318. void die(const char *str, struct pt_regs *regs, long err)
  319. {
  320. unsigned long flags = oops_begin();
  321. if (die_nest_count < 3) {
  322. report_bug(regs->ip, regs);
  323. if (__die(str, regs, err))
  324. regs = NULL;
  325. } else {
  326. printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
  327. }
  328. oops_end(flags, regs, SIGSEGV);
  329. }
  330. static DEFINE_SPINLOCK(nmi_print_lock);
  331. void notrace __kprobes
  332. die_nmi(char *str, struct pt_regs *regs, int do_panic)
  333. {
  334. if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
  335. return;
  336. spin_lock(&nmi_print_lock);
  337. /*
  338. * We are in trouble anyway, lets at least try
  339. * to get a message out:
  340. */
  341. bust_spinlocks(1);
  342. printk(KERN_EMERG "%s", str);
  343. printk(" on CPU%d, ip %08lx, registers:\n",
  344. smp_processor_id(), regs->ip);
  345. show_registers(regs);
  346. if (do_panic)
  347. panic("Non maskable interrupt");
  348. console_silent();
  349. spin_unlock(&nmi_print_lock);
  350. bust_spinlocks(0);
  351. /*
  352. * If we are in kernel we are probably nested up pretty bad
  353. * and might aswell get out now while we still can:
  354. */
  355. if (!user_mode_vm(regs)) {
  356. current->thread.trap_no = 2;
  357. crash_kexec(regs);
  358. }
  359. do_exit(SIGSEGV);
  360. }
  361. static int __init oops_setup(char *s)
  362. {
  363. if (!s)
  364. return -EINVAL;
  365. if (!strcmp(s, "panic"))
  366. panic_on_oops = 1;
  367. return 0;
  368. }
  369. early_param("oops", oops_setup);
  370. static int __init kstack_setup(char *s)
  371. {
  372. if (!s)
  373. return -EINVAL;
  374. kstack_depth_to_print = simple_strtoul(s, NULL, 0);
  375. return 0;
  376. }
  377. early_param("kstack", kstack_setup);
  378. static int __init code_bytes_setup(char *s)
  379. {
  380. code_bytes = simple_strtoul(s, NULL, 0);
  381. if (code_bytes > 8192)
  382. code_bytes = 8192;
  383. return 1;
  384. }
  385. __setup("code_bytes=", code_bytes_setup);