dumpstack.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Stack dumping functions
  3. *
  4. * Copyright IBM Corp. 1999, 2013
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/hardirq.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/utsname.h>
  10. #include <linux/export.h>
  11. #include <linux/kdebug.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/module.h>
  14. #include <linux/sched.h>
  15. #include <asm/processor.h>
  16. #include <asm/debug.h>
  17. #include <asm/ipl.h>
  18. #ifndef CONFIG_64BIT
  19. #define LONG "%08lx "
  20. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  21. static int kstack_depth_to_print = 12;
  22. #else /* CONFIG_64BIT */
  23. #define LONG "%016lx "
  24. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  25. static int kstack_depth_to_print = 20;
  26. #endif /* CONFIG_64BIT */
  27. /*
  28. * For show_trace we have tree different stack to consider:
  29. * - the panic stack which is used if the kernel stack has overflown
  30. * - the asynchronous interrupt stack (cpu related)
  31. * - the synchronous kernel stack (process related)
  32. * The stack trace can start at any of the three stack and can potentially
  33. * touch all of them. The order is: panic stack, async stack, sync stack.
  34. */
  35. static unsigned long
  36. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  37. {
  38. struct stack_frame *sf;
  39. struct pt_regs *regs;
  40. while (1) {
  41. sp = sp & PSW_ADDR_INSN;
  42. if (sp < low || sp > high - sizeof(*sf))
  43. return sp;
  44. sf = (struct stack_frame *) sp;
  45. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  46. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  47. /* Follow the backchain. */
  48. while (1) {
  49. low = sp;
  50. sp = sf->back_chain & PSW_ADDR_INSN;
  51. if (!sp)
  52. break;
  53. if (sp <= low || sp > high - sizeof(*sf))
  54. return sp;
  55. sf = (struct stack_frame *) sp;
  56. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  57. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  58. }
  59. /* Zero backchain detected, check for interrupt frame. */
  60. sp = (unsigned long) (sf + 1);
  61. if (sp <= low || sp > high - sizeof(*regs))
  62. return sp;
  63. regs = (struct pt_regs *) sp;
  64. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  65. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  66. low = sp;
  67. sp = regs->gprs[15];
  68. }
  69. }
  70. static void show_trace(struct task_struct *task, unsigned long *stack)
  71. {
  72. register unsigned long __r15 asm ("15");
  73. unsigned long sp;
  74. sp = (unsigned long) stack;
  75. if (!sp)
  76. sp = task ? task->thread.ksp : __r15;
  77. printk("Call Trace:\n");
  78. #ifdef CONFIG_CHECK_STACK
  79. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  80. S390_lowcore.panic_stack);
  81. #endif
  82. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  83. S390_lowcore.async_stack);
  84. if (task)
  85. __show_trace(sp, (unsigned long) task_stack_page(task),
  86. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  87. else
  88. __show_trace(sp, S390_lowcore.thread_info,
  89. S390_lowcore.thread_info + THREAD_SIZE);
  90. if (!task)
  91. task = current;
  92. debug_show_held_locks(task);
  93. }
  94. void show_stack(struct task_struct *task, unsigned long *sp)
  95. {
  96. register unsigned long *__r15 asm ("15");
  97. unsigned long *stack;
  98. int i;
  99. if (!sp)
  100. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  101. else
  102. stack = sp;
  103. for (i = 0; i < kstack_depth_to_print; i++) {
  104. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  105. break;
  106. if ((i * sizeof(long) % 32) == 0)
  107. printk("%s ", i == 0 ? "" : "\n");
  108. printk(LONG, *stack++);
  109. }
  110. printk("\n");
  111. show_trace(task, sp);
  112. }
  113. static void show_last_breaking_event(struct pt_regs *regs)
  114. {
  115. #ifdef CONFIG_64BIT
  116. printk("Last Breaking-Event-Address:\n");
  117. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  118. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  119. #endif
  120. }
  121. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  122. {
  123. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  124. }
  125. void show_registers(struct pt_regs *regs)
  126. {
  127. char *mode;
  128. mode = user_mode(regs) ? "User" : "Krnl";
  129. printk("%s PSW : %p %p",
  130. mode, (void *) regs->psw.mask,
  131. (void *) regs->psw.addr);
  132. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  133. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  134. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  135. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  136. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  137. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  138. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  139. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  140. #ifdef CONFIG_64BIT
  141. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  142. #endif
  143. printk("\n%s GPRS: " FOURLONG, mode,
  144. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  145. printk(" " FOURLONG,
  146. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  147. printk(" " FOURLONG,
  148. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  149. printk(" " FOURLONG,
  150. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  151. show_code(regs);
  152. }
  153. void show_regs(struct pt_regs *regs)
  154. {
  155. show_regs_print_info(KERN_DEFAULT);
  156. show_registers(regs);
  157. /* Show stack backtrace if pt_regs is from kernel mode */
  158. if (!user_mode(regs))
  159. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  160. show_last_breaking_event(regs);
  161. }
  162. static DEFINE_SPINLOCK(die_lock);
  163. void die(struct pt_regs *regs, const char *str)
  164. {
  165. static int die_counter;
  166. oops_enter();
  167. lgr_info_log();
  168. debug_stop_all();
  169. console_verbose();
  170. spin_lock_irq(&die_lock);
  171. bust_spinlocks(1);
  172. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  173. #ifdef CONFIG_PREEMPT
  174. printk("PREEMPT ");
  175. #endif
  176. #ifdef CONFIG_SMP
  177. printk("SMP ");
  178. #endif
  179. #ifdef CONFIG_DEBUG_PAGEALLOC
  180. printk("DEBUG_PAGEALLOC");
  181. #endif
  182. printk("\n");
  183. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  184. print_modules();
  185. show_regs(regs);
  186. bust_spinlocks(0);
  187. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  188. spin_unlock_irq(&die_lock);
  189. if (in_interrupt())
  190. panic("Fatal exception in interrupt");
  191. if (panic_on_oops)
  192. panic("Fatal exception: panic_on_oops");
  193. oops_exit();
  194. do_exit(SIGSEGV);
  195. }