backtrace.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /**
  2. * @file backtrace.c
  3. *
  4. * @remark Copyright 2004 Silicon Graphics Inc. All Rights Reserved.
  5. * @remark Read the file COPYING
  6. *
  7. * @author Greg Banks <gnb@melbourne.sgi.com>
  8. * @author Keith Owens <kaos@melbourne.sgi.com>
  9. * Based on work done for the ia64 port of the SGI kernprof patch, which is
  10. * Copyright (c) 2003-2004 Silicon Graphics Inc. All Rights Reserved.
  11. */
  12. #include <linux/oprofile.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <asm/ptrace.h>
  16. #include <asm/system.h>
  17. /*
  18. * For IA64 we need to perform a complex little dance to get both
  19. * the struct pt_regs and a synthetic struct switch_stack in place
  20. * to allow the unwind code to work. This dance requires our unwind
  21. * using code to be called from a function called from unw_init_running().
  22. * There we only get a single void* data pointer, so use this struct
  23. * to hold all the data we need during the unwind.
  24. */
  25. typedef struct
  26. {
  27. unsigned int depth;
  28. struct pt_regs *regs;
  29. struct unw_frame_info frame;
  30. u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
  31. } ia64_backtrace_t;
  32. #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
  33. /*
  34. * Returns non-zero if the PC is in the spinlock contention out-of-line code
  35. * with non-standard calling sequence (on older compilers).
  36. */
  37. static __inline__ int in_old_ool_spinlock_code(unsigned long pc)
  38. {
  39. extern const char ia64_spinlock_contention_pre3_4[] __attribute__ ((weak));
  40. extern const char ia64_spinlock_contention_pre3_4_end[] __attribute__ ((weak));
  41. unsigned long sc_start = (unsigned long)ia64_spinlock_contention_pre3_4;
  42. unsigned long sc_end = (unsigned long)ia64_spinlock_contention_pre3_4_end;
  43. return (sc_start && sc_end && pc >= sc_start && pc < sc_end);
  44. }
  45. #else
  46. /* Newer spinlock code does a proper br.call and works fine with the unwinder */
  47. #define in_old_ool_spinlock_code(pc) 0
  48. #endif
  49. /* Returns non-zero if the PC is in the Interrupt Vector Table */
  50. static __inline__ int in_ivt_code(unsigned long pc)
  51. {
  52. extern char ia64_ivt[];
  53. return (pc >= (u_long)ia64_ivt && pc < (u_long)ia64_ivt+32768);
  54. }
  55. /*
  56. * Unwind to next stack frame.
  57. */
  58. static __inline__ int next_frame(ia64_backtrace_t *bt)
  59. {
  60. /*
  61. * Avoid unsightly console message from unw_unwind() when attempting
  62. * to unwind through the Interrupt Vector Table which has no unwind
  63. * information.
  64. */
  65. if (in_ivt_code(bt->frame.ip))
  66. return 0;
  67. /*
  68. * WAR for spinlock contention from leaf functions. ia64_spinlock_contention_pre3_4
  69. * has ar.pfs == r0. Leaf functions do not modify ar.pfs so ar.pfs remains
  70. * as 0, stopping the backtrace. Record the previous ar.pfs when the current
  71. * IP is in ia64_spinlock_contention_pre3_4 then unwind, if pfs_loc has not changed
  72. * after unwind then use pt_regs.ar_pfs which is where the real ar.pfs is for
  73. * leaf functions.
  74. */
  75. if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
  76. bt->frame.pfs_loc = &bt->regs->ar_pfs;
  77. bt->prev_pfs_loc = (in_old_ool_spinlock_code(bt->frame.ip) ? bt->frame.pfs_loc : NULL);
  78. return unw_unwind(&bt->frame) == 0;
  79. }
  80. static void do_ia64_backtrace(struct unw_frame_info *info, void *vdata)
  81. {
  82. ia64_backtrace_t *bt = vdata;
  83. struct switch_stack *sw;
  84. int count = 0;
  85. u_long pc, sp;
  86. sw = (struct switch_stack *)(info+1);
  87. /* padding from unw_init_running */
  88. sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15);
  89. unw_init_frame_info(&bt->frame, current, sw);
  90. /* skip over interrupt frame and oprofile calls */
  91. do {
  92. unw_get_sp(&bt->frame, &sp);
  93. if (sp >= (u_long)bt->regs)
  94. break;
  95. if (!next_frame(bt))
  96. return;
  97. } while (count++ < 200);
  98. /* finally, grab the actual sample */
  99. while (bt->depth-- && next_frame(bt)) {
  100. unw_get_ip(&bt->frame, &pc);
  101. oprofile_add_trace(pc);
  102. if (unw_is_intr_frame(&bt->frame)) {
  103. /*
  104. * Interrupt received on kernel stack; this can
  105. * happen when timer interrupt fires while processing
  106. * a softirq from the tail end of a hardware interrupt
  107. * which interrupted a system call. Don't laugh, it
  108. * happens! Splice the backtrace into two parts to
  109. * avoid spurious cycles in the gprof output.
  110. */
  111. /* TODO: split rather than drop the 2nd half */
  112. break;
  113. }
  114. }
  115. }
  116. void
  117. ia64_backtrace(struct pt_regs * const regs, unsigned int depth)
  118. {
  119. ia64_backtrace_t bt;
  120. unsigned long flags;
  121. /*
  122. * On IA64 there is little hope of getting backtraces from
  123. * user space programs -- the problems of getting the unwind
  124. * information from arbitrary user programs are extreme.
  125. */
  126. if (user_mode(regs))
  127. return;
  128. bt.depth = depth;
  129. bt.regs = regs;
  130. bt.prev_pfs_loc = NULL;
  131. local_irq_save(flags);
  132. unw_init_running(do_ia64_backtrace, &bt);
  133. local_irq_restore(flags);
  134. }