trace_event_perf.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
  11. EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
  12. EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
  13. static char *perf_trace_buf;
  14. static char *perf_trace_buf_nmi;
  15. /*
  16. * Force it to be aligned to unsigned long to avoid misaligned accesses
  17. * suprises
  18. */
  19. typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  20. perf_trace_t;
  21. /* Count the events in use (per event id, not per instance) */
  22. static int total_ref_count;
  23. static int perf_trace_event_enable(struct ftrace_event_call *event)
  24. {
  25. char *buf;
  26. int ret = -ENOMEM;
  27. if (event->perf_refcount++ > 0)
  28. return 0;
  29. if (!total_ref_count) {
  30. buf = (char *)alloc_percpu(perf_trace_t);
  31. if (!buf)
  32. goto fail_buf;
  33. rcu_assign_pointer(perf_trace_buf, buf);
  34. buf = (char *)alloc_percpu(perf_trace_t);
  35. if (!buf)
  36. goto fail_buf_nmi;
  37. rcu_assign_pointer(perf_trace_buf_nmi, buf);
  38. }
  39. if (event->class->reg)
  40. ret = event->class->reg(event, TRACE_REG_PERF_REGISTER);
  41. else
  42. ret = tracepoint_probe_register(event->name,
  43. event->class->perf_probe,
  44. event);
  45. if (!ret) {
  46. total_ref_count++;
  47. return 0;
  48. }
  49. fail_buf_nmi:
  50. if (!total_ref_count) {
  51. free_percpu(perf_trace_buf_nmi);
  52. free_percpu(perf_trace_buf);
  53. perf_trace_buf_nmi = NULL;
  54. perf_trace_buf = NULL;
  55. }
  56. fail_buf:
  57. event->perf_refcount--;
  58. return ret;
  59. }
  60. int perf_trace_enable(int event_id)
  61. {
  62. struct ftrace_event_call *event;
  63. int ret = -EINVAL;
  64. mutex_lock(&event_mutex);
  65. list_for_each_entry(event, &ftrace_events, list) {
  66. if (event->id == event_id &&
  67. event->class && event->class->perf_probe &&
  68. try_module_get(event->mod)) {
  69. ret = perf_trace_event_enable(event);
  70. break;
  71. }
  72. }
  73. mutex_unlock(&event_mutex);
  74. return ret;
  75. }
  76. static void perf_trace_event_disable(struct ftrace_event_call *event)
  77. {
  78. char *buf, *nmi_buf;
  79. if (--event->perf_refcount > 0)
  80. return;
  81. if (event->class->reg)
  82. event->class->reg(event, TRACE_REG_PERF_UNREGISTER);
  83. else
  84. tracepoint_probe_unregister(event->name, event->class->perf_probe, event);
  85. if (!--total_ref_count) {
  86. buf = perf_trace_buf;
  87. rcu_assign_pointer(perf_trace_buf, NULL);
  88. nmi_buf = perf_trace_buf_nmi;
  89. rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  90. /*
  91. * Ensure every events in profiling have finished before
  92. * releasing the buffers
  93. */
  94. synchronize_sched();
  95. free_percpu(buf);
  96. free_percpu(nmi_buf);
  97. }
  98. }
  99. void perf_trace_disable(int event_id)
  100. {
  101. struct ftrace_event_call *event;
  102. mutex_lock(&event_mutex);
  103. list_for_each_entry(event, &ftrace_events, list) {
  104. if (event->id == event_id) {
  105. perf_trace_event_disable(event);
  106. module_put(event->mod);
  107. break;
  108. }
  109. }
  110. mutex_unlock(&event_mutex);
  111. }
  112. __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
  113. int *rctxp, unsigned long *irq_flags)
  114. {
  115. struct trace_entry *entry;
  116. char *trace_buf, *raw_data;
  117. int pc, cpu;
  118. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
  119. pc = preempt_count();
  120. /* Protect the per cpu buffer, begin the rcu read side */
  121. local_irq_save(*irq_flags);
  122. *rctxp = perf_swevent_get_recursion_context();
  123. if (*rctxp < 0)
  124. goto err_recursion;
  125. cpu = smp_processor_id();
  126. if (in_nmi())
  127. trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
  128. else
  129. trace_buf = rcu_dereference_sched(perf_trace_buf);
  130. if (!trace_buf)
  131. goto err;
  132. raw_data = per_cpu_ptr(trace_buf, cpu);
  133. /* zero the dead bytes from align to not leak stack to user */
  134. memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
  135. entry = (struct trace_entry *)raw_data;
  136. tracing_generic_entry_update(entry, *irq_flags, pc);
  137. entry->type = type;
  138. return raw_data;
  139. err:
  140. perf_swevent_put_recursion_context(*rctxp);
  141. err_recursion:
  142. local_irq_restore(*irq_flags);
  143. return NULL;
  144. }
  145. EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);