stacktrace.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. /*
  2. * arch/i386/kernel/stacktrace.c
  3. *
  4. * Stack trace management functions
  5. *
  6. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/stacktrace.h>
  10. static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
  11. {
  12. return p > (void *)tinfo &&
  13. p < (void *)tinfo + THREAD_SIZE - 3;
  14. }
  15. /*
  16. * Save stack-backtrace addresses into a stack_trace buffer:
  17. */
  18. static inline unsigned long
  19. save_context_stack(struct stack_trace *trace, unsigned int skip,
  20. struct thread_info *tinfo, unsigned long *stack,
  21. unsigned long ebp)
  22. {
  23. unsigned long addr;
  24. #ifdef CONFIG_FRAME_POINTER
  25. while (valid_stack_ptr(tinfo, (void *)ebp)) {
  26. addr = *(unsigned long *)(ebp + 4);
  27. if (!skip)
  28. trace->entries[trace->nr_entries++] = addr;
  29. else
  30. skip--;
  31. if (trace->nr_entries >= trace->max_entries)
  32. break;
  33. /*
  34. * break out of recursive entries (such as
  35. * end_of_stack_stop_unwind_function):
  36. */
  37. if (ebp == *(unsigned long *)ebp)
  38. break;
  39. ebp = *(unsigned long *)ebp;
  40. }
  41. #else
  42. while (valid_stack_ptr(tinfo, stack)) {
  43. addr = *stack++;
  44. if (__kernel_text_address(addr)) {
  45. if (!skip)
  46. trace->entries[trace->nr_entries++] = addr;
  47. else
  48. skip--;
  49. if (trace->nr_entries >= trace->max_entries)
  50. break;
  51. }
  52. }
  53. #endif
  54. return ebp;
  55. }
  56. /*
  57. * Save stack-backtrace addresses into a stack_trace buffer.
  58. * If all_contexts is set, all contexts (hardirq, softirq and process)
  59. * are saved. If not set then only the current context is saved.
  60. */
  61. void save_stack_trace(struct stack_trace *trace,
  62. struct task_struct *task, int all_contexts,
  63. unsigned int skip)
  64. {
  65. unsigned long ebp;
  66. unsigned long *stack = &ebp;
  67. WARN_ON(trace->nr_entries || !trace->max_entries);
  68. if (!task || task == current) {
  69. /* Grab ebp right from our regs: */
  70. asm ("movl %%ebp, %0" : "=r" (ebp));
  71. } else {
  72. /* ebp is the last reg pushed by switch_to(): */
  73. ebp = *(unsigned long *) task->thread.esp;
  74. }
  75. while (1) {
  76. struct thread_info *context = (struct thread_info *)
  77. ((unsigned long)stack & (~(THREAD_SIZE - 1)));
  78. ebp = save_context_stack(trace, skip, context, stack, ebp);
  79. stack = (unsigned long *)context->previous_esp;
  80. if (!all_contexts || !stack ||
  81. trace->nr_entries >= trace->max_entries)
  82. break;
  83. trace->entries[trace->nr_entries++] = ULONG_MAX;
  84. if (trace->nr_entries >= trace->max_entries)
  85. break;
  86. }
  87. }