dumpstack.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Stack dumping functions
  3. *
  4. * Copyright IBM Corp. 1999, 2013
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/hardirq.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/utsname.h>
  10. #include <linux/export.h>
  11. #include <linux/kdebug.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/module.h>
  14. #include <linux/sched.h>
  15. #include <asm/processor.h>
  16. #include <asm/debug.h>
  17. #include <asm/ipl.h>
  18. #ifndef CONFIG_64BIT
  19. #define LONG "%08lx "
  20. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  21. static int kstack_depth_to_print = 12;
  22. #else /* CONFIG_64BIT */
  23. #define LONG "%016lx "
  24. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  25. static int kstack_depth_to_print = 20;
  26. #endif /* CONFIG_64BIT */
  27. /*
  28. * For show_trace we have tree different stack to consider:
  29. * - the panic stack which is used if the kernel stack has overflown
  30. * - the asynchronous interrupt stack (cpu related)
  31. * - the synchronous kernel stack (process related)
  32. * The stack trace can start at any of the three stack and can potentially
  33. * touch all of them. The order is: panic stack, async stack, sync stack.
  34. */
  35. static unsigned long
  36. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  37. {
  38. struct stack_frame *sf;
  39. struct pt_regs *regs;
  40. unsigned long addr;
  41. while (1) {
  42. sp = sp & PSW_ADDR_INSN;
  43. if (sp < low || sp > high - sizeof(*sf))
  44. return sp;
  45. sf = (struct stack_frame *) sp;
  46. addr = sf->gprs[8] & PSW_ADDR_INSN;
  47. printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
  48. /* Follow the backchain. */
  49. while (1) {
  50. low = sp;
  51. sp = sf->back_chain & PSW_ADDR_INSN;
  52. if (!sp)
  53. break;
  54. if (sp <= low || sp > high - sizeof(*sf))
  55. return sp;
  56. sf = (struct stack_frame *) sp;
  57. addr = sf->gprs[8] & PSW_ADDR_INSN;
  58. printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
  59. }
  60. /* Zero backchain detected, check for interrupt frame. */
  61. sp = (unsigned long) (sf + 1);
  62. if (sp <= low || sp > high - sizeof(*regs))
  63. return sp;
  64. regs = (struct pt_regs *) sp;
  65. addr = regs->psw.addr & PSW_ADDR_INSN;
  66. printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
  67. low = sp;
  68. sp = regs->gprs[15];
  69. }
  70. }
  71. static void show_trace(struct task_struct *task, unsigned long *stack)
  72. {
  73. const unsigned long frame_size =
  74. STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
  75. register unsigned long __r15 asm ("15");
  76. unsigned long sp;
  77. sp = (unsigned long) stack;
  78. if (!sp)
  79. sp = task ? task->thread.ksp : __r15;
  80. printk("Call Trace:\n");
  81. #ifdef CONFIG_CHECK_STACK
  82. sp = __show_trace(sp,
  83. S390_lowcore.panic_stack + frame_size - 4096,
  84. S390_lowcore.panic_stack + frame_size);
  85. #endif
  86. sp = __show_trace(sp,
  87. S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
  88. S390_lowcore.async_stack + frame_size);
  89. if (task)
  90. __show_trace(sp, (unsigned long) task_stack_page(task),
  91. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  92. else
  93. __show_trace(sp, S390_lowcore.thread_info,
  94. S390_lowcore.thread_info + THREAD_SIZE);
  95. if (!task)
  96. task = current;
  97. debug_show_held_locks(task);
  98. }
  99. void show_stack(struct task_struct *task, unsigned long *sp)
  100. {
  101. register unsigned long *__r15 asm ("15");
  102. unsigned long *stack;
  103. int i;
  104. if (!sp)
  105. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  106. else
  107. stack = sp;
  108. for (i = 0; i < kstack_depth_to_print; i++) {
  109. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  110. break;
  111. if ((i * sizeof(long) % 32) == 0)
  112. printk("%s ", i == 0 ? "" : "\n");
  113. printk(LONG, *stack++);
  114. }
  115. printk("\n");
  116. show_trace(task, sp);
  117. }
  118. static void show_last_breaking_event(struct pt_regs *regs)
  119. {
  120. #ifdef CONFIG_64BIT
  121. printk("Last Breaking-Event-Address:\n");
  122. printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
  123. #endif
  124. }
  125. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  126. {
  127. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  128. }
  129. void show_registers(struct pt_regs *regs)
  130. {
  131. char *mode;
  132. mode = user_mode(regs) ? "User" : "Krnl";
  133. printk("%s PSW : %p %p (%pSR)\n",
  134. mode, (void *) regs->psw.mask,
  135. (void *) regs->psw.addr,
  136. (void *) regs->psw.addr);
  137. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  138. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  139. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  140. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  141. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  142. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  143. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  144. #ifdef CONFIG_64BIT
  145. printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
  146. #endif
  147. printk("\n%s GPRS: " FOURLONG, mode,
  148. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  149. printk(" " FOURLONG,
  150. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  151. printk(" " FOURLONG,
  152. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  153. printk(" " FOURLONG,
  154. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  155. show_code(regs);
  156. }
  157. void show_regs(struct pt_regs *regs)
  158. {
  159. show_regs_print_info(KERN_DEFAULT);
  160. show_registers(regs);
  161. /* Show stack backtrace if pt_regs is from kernel mode */
  162. if (!user_mode(regs))
  163. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  164. show_last_breaking_event(regs);
  165. }
  166. static DEFINE_SPINLOCK(die_lock);
  167. void die(struct pt_regs *regs, const char *str)
  168. {
  169. static int die_counter;
  170. oops_enter();
  171. lgr_info_log();
  172. debug_stop_all();
  173. console_verbose();
  174. spin_lock_irq(&die_lock);
  175. bust_spinlocks(1);
  176. printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
  177. #ifdef CONFIG_PREEMPT
  178. printk("PREEMPT ");
  179. #endif
  180. #ifdef CONFIG_SMP
  181. printk("SMP ");
  182. #endif
  183. #ifdef CONFIG_DEBUG_PAGEALLOC
  184. printk("DEBUG_PAGEALLOC");
  185. #endif
  186. printk("\n");
  187. notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
  188. print_modules();
  189. show_regs(regs);
  190. bust_spinlocks(0);
  191. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  192. spin_unlock_irq(&die_lock);
  193. if (in_interrupt())
  194. panic("Fatal exception in interrupt");
  195. if (panic_on_oops)
  196. panic("Fatal exception: panic_on_oops");
  197. oops_exit();
  198. do_exit(SIGSEGV);
  199. }