stacktrace.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Stack trace management functions
  3. *
  4. * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/stacktrace.h>
  8. #include <linux/module.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/stacktrace.h>
  11. static void save_stack_warning(void *data, char *msg)
  12. {
  13. }
  14. static void
  15. save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
  16. {
  17. }
  18. static int save_stack_stack(void *data, char *name)
  19. {
  20. return 0;
  21. }
  22. static void save_stack_address(void *data, unsigned long addr, int reliable)
  23. {
  24. struct stack_trace *trace = data;
  25. if (!reliable)
  26. return;
  27. if (trace->skip > 0) {
  28. trace->skip--;
  29. return;
  30. }
  31. if (trace->nr_entries < trace->max_entries)
  32. trace->entries[trace->nr_entries++] = addr;
  33. }
  34. static void
  35. save_stack_address_nosched(void *data, unsigned long addr, int reliable)
  36. {
  37. struct stack_trace *trace = (struct stack_trace *)data;
  38. if (!reliable)
  39. return;
  40. if (in_sched_functions(addr))
  41. return;
  42. if (trace->skip > 0) {
  43. trace->skip--;
  44. return;
  45. }
  46. if (trace->nr_entries < trace->max_entries)
  47. trace->entries[trace->nr_entries++] = addr;
  48. }
  49. static const struct stacktrace_ops save_stack_ops = {
  50. .warning = save_stack_warning,
  51. .warning_symbol = save_stack_warning_symbol,
  52. .stack = save_stack_stack,
  53. .address = save_stack_address,
  54. .walk_stack = print_context_stack,
  55. };
  56. static const struct stacktrace_ops save_stack_ops_nosched = {
  57. .warning = save_stack_warning,
  58. .warning_symbol = save_stack_warning_symbol,
  59. .stack = save_stack_stack,
  60. .address = save_stack_address_nosched,
  61. .walk_stack = print_context_stack,
  62. };
  63. /*
  64. * Save stack-backtrace addresses into a stack_trace buffer.
  65. */
  66. void save_stack_trace(struct stack_trace *trace)
  67. {
  68. dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
  69. if (trace->nr_entries < trace->max_entries)
  70. trace->entries[trace->nr_entries++] = ULONG_MAX;
  71. }
  72. EXPORT_SYMBOL_GPL(save_stack_trace);
  73. void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
  74. {
  75. dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
  76. if (trace->nr_entries < trace->max_entries)
  77. trace->entries[trace->nr_entries++] = ULONG_MAX;
  78. }
  79. void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  80. {
  81. dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
  82. if (trace->nr_entries < trace->max_entries)
  83. trace->entries[trace->nr_entries++] = ULONG_MAX;
  84. }
  85. EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  86. /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  87. struct stack_frame_user {
  88. const void __user *next_fp;
  89. unsigned long ret_addr;
  90. };
  91. static int
  92. copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
  93. {
  94. int ret;
  95. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  96. return 0;
  97. ret = 1;
  98. pagefault_disable();
  99. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  100. ret = 0;
  101. pagefault_enable();
  102. return ret;
  103. }
  104. static inline void __save_stack_trace_user(struct stack_trace *trace)
  105. {
  106. const struct pt_regs *regs = task_pt_regs(current);
  107. const void __user *fp = (const void __user *)regs->bp;
  108. if (trace->nr_entries < trace->max_entries)
  109. trace->entries[trace->nr_entries++] = regs->ip;
  110. while (trace->nr_entries < trace->max_entries) {
  111. struct stack_frame_user frame;
  112. frame.next_fp = NULL;
  113. frame.ret_addr = 0;
  114. if (!copy_stack_frame(fp, &frame))
  115. break;
  116. if ((unsigned long)fp < regs->sp)
  117. break;
  118. if (frame.ret_addr) {
  119. trace->entries[trace->nr_entries++] =
  120. frame.ret_addr;
  121. }
  122. if (fp == frame.next_fp)
  123. break;
  124. fp = frame.next_fp;
  125. }
  126. }
  127. void save_stack_trace_user(struct stack_trace *trace)
  128. {
  129. /*
  130. * Trace user stack if we are not a kernel thread
  131. */
  132. if (current->mm) {
  133. __save_stack_trace_user(trace);
  134. }
  135. if (trace->nr_entries < trace->max_entries)
  136. trace->entries[trace->nr_entries++] = ULONG_MAX;
  137. }