perf_callchain.c 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /*
  2. * Performance event callchain support - SuperH architecture code
  3. *
  4. * Copyright (C) 2009 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/sched.h>
  12. #include <linux/perf_event.h>
  13. #include <linux/percpu.h>
  14. #include <asm/unwinder.h>
  15. #include <asm/ptrace.h>
  16. static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
  17. {
  18. if (entry->nr < PERF_MAX_STACK_DEPTH)
  19. entry->ip[entry->nr++] = ip;
  20. }
  21. static void callchain_warning(void *data, char *msg)
  22. {
  23. }
  24. static void
  25. callchain_warning_symbol(void *data, char *msg, unsigned long symbol)
  26. {
  27. }
  28. static int callchain_stack(void *data, char *name)
  29. {
  30. return 0;
  31. }
  32. static void callchain_address(void *data, unsigned long addr, int reliable)
  33. {
  34. struct perf_callchain_entry *entry = data;
  35. if (reliable)
  36. callchain_store(entry, addr);
  37. }
  38. static const struct stacktrace_ops callchain_ops = {
  39. .warning = callchain_warning,
  40. .warning_symbol = callchain_warning_symbol,
  41. .stack = callchain_stack,
  42. .address = callchain_address,
  43. };
  44. static void
  45. perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
  46. {
  47. callchain_store(entry, PERF_CONTEXT_KERNEL);
  48. callchain_store(entry, regs->pc);
  49. unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
  50. }
  51. static void
  52. perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
  53. {
  54. int is_user;
  55. if (!regs)
  56. return;
  57. is_user = user_mode(regs);
  58. if (is_user && current->state != TASK_RUNNING)
  59. return;
  60. /*
  61. * Only the kernel side is implemented for now.
  62. */
  63. if (!is_user)
  64. perf_callchain_kernel(regs, entry);
  65. }
  66. /*
  67. * No need for separate IRQ and NMI entries.
  68. */
  69. static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
  70. struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
  71. {
  72. struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
  73. entry->nr = 0;
  74. perf_do_callchain(regs, entry);
  75. return entry;
  76. }