trace_event_perf.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
  11. EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
  12. static char *perf_trace_buf;
  13. static char *perf_trace_buf_nmi;
  14. typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
  15. /* Count the events in use (per event id, not per instance) */
  16. static int total_ref_count;
  17. static int perf_trace_event_enable(struct ftrace_event_call *event)
  18. {
  19. char *buf;
  20. int ret = -ENOMEM;
  21. if (event->perf_refcount++ > 0)
  22. return 0;
  23. if (!total_ref_count) {
  24. buf = (char *)alloc_percpu(perf_trace_t);
  25. if (!buf)
  26. goto fail_buf;
  27. rcu_assign_pointer(perf_trace_buf, buf);
  28. buf = (char *)alloc_percpu(perf_trace_t);
  29. if (!buf)
  30. goto fail_buf_nmi;
  31. rcu_assign_pointer(perf_trace_buf_nmi, buf);
  32. }
  33. ret = event->perf_event_enable(event);
  34. if (!ret) {
  35. total_ref_count++;
  36. return 0;
  37. }
  38. fail_buf_nmi:
  39. if (!total_ref_count) {
  40. free_percpu(perf_trace_buf_nmi);
  41. free_percpu(perf_trace_buf);
  42. perf_trace_buf_nmi = NULL;
  43. perf_trace_buf = NULL;
  44. }
  45. fail_buf:
  46. event->perf_refcount--;
  47. return ret;
  48. }
  49. int perf_trace_enable(int event_id)
  50. {
  51. struct ftrace_event_call *event;
  52. int ret = -EINVAL;
  53. mutex_lock(&event_mutex);
  54. list_for_each_entry(event, &ftrace_events, list) {
  55. if (event->id == event_id && event->perf_event_enable &&
  56. try_module_get(event->mod)) {
  57. ret = perf_trace_event_enable(event);
  58. break;
  59. }
  60. }
  61. mutex_unlock(&event_mutex);
  62. return ret;
  63. }
  64. static void perf_trace_event_disable(struct ftrace_event_call *event)
  65. {
  66. char *buf, *nmi_buf;
  67. if (--event->perf_refcount > 0)
  68. return;
  69. event->perf_event_disable(event);
  70. if (!--total_ref_count) {
  71. buf = perf_trace_buf;
  72. rcu_assign_pointer(perf_trace_buf, NULL);
  73. nmi_buf = perf_trace_buf_nmi;
  74. rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  75. /*
  76. * Ensure every events in profiling have finished before
  77. * releasing the buffers
  78. */
  79. synchronize_sched();
  80. free_percpu(buf);
  81. free_percpu(nmi_buf);
  82. }
  83. }
  84. void perf_trace_disable(int event_id)
  85. {
  86. struct ftrace_event_call *event;
  87. mutex_lock(&event_mutex);
  88. list_for_each_entry(event, &ftrace_events, list) {
  89. if (event->id == event_id) {
  90. perf_trace_event_disable(event);
  91. module_put(event->mod);
  92. break;
  93. }
  94. }
  95. mutex_unlock(&event_mutex);
  96. }
  97. __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
  98. int *rctxp, unsigned long *irq_flags)
  99. {
  100. struct trace_entry *entry;
  101. char *trace_buf, *raw_data;
  102. int pc, cpu;
  103. pc = preempt_count();
  104. /* Protect the per cpu buffer, begin the rcu read side */
  105. local_irq_save(*irq_flags);
  106. *rctxp = perf_swevent_get_recursion_context();
  107. if (*rctxp < 0)
  108. goto err_recursion;
  109. cpu = smp_processor_id();
  110. if (in_nmi())
  111. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  112. else
  113. trace_buf = rcu_dereference(perf_trace_buf);
  114. if (!trace_buf)
  115. goto err;
  116. raw_data = per_cpu_ptr(trace_buf, cpu);
  117. /* zero the dead bytes from align to not leak stack to user */
  118. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  119. entry = (struct trace_entry *)raw_data;
  120. tracing_generic_entry_update(entry, *irq_flags, pc);
  121. entry->type = type;
  122. return raw_data;
  123. err:
  124. perf_swevent_put_recursion_context(*rctxp);
  125. err_recursion:
  126. local_irq_restore(*irq_flags);
  127. return NULL;
  128. }
  129. EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);