trace_event_profile.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * trace event based perf counter profiling
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. static char *perf_trace_buf;
  11. static char *perf_trace_buf_nmi;
  12. typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
  13. /* Count the events in use (per event id, not per instance) */
  14. static int total_profile_count;
  15. static int ftrace_profile_enable_event(struct ftrace_event_call *event)
  16. {
  17. char *buf;
  18. int ret = -ENOMEM;
  19. if (event->profile_count++ > 0)
  20. return 0;
  21. if (!total_profile_count) {
  22. buf = (char *)alloc_percpu(perf_trace_t);
  23. if (!buf)
  24. goto fail_buf;
  25. rcu_assign_pointer(perf_trace_buf, buf);
  26. buf = (char *)alloc_percpu(perf_trace_t);
  27. if (!buf)
  28. goto fail_buf_nmi;
  29. rcu_assign_pointer(perf_trace_buf_nmi, buf);
  30. }
  31. ret = event->profile_enable(event);
  32. if (!ret) {
  33. total_profile_count++;
  34. return 0;
  35. }
  36. fail_buf_nmi:
  37. if (!total_profile_count) {
  38. free_percpu(perf_trace_buf_nmi);
  39. free_percpu(perf_trace_buf);
  40. perf_trace_buf_nmi = NULL;
  41. perf_trace_buf = NULL;
  42. }
  43. fail_buf:
  44. event->profile_count--;
  45. return ret;
  46. }
  47. int ftrace_profile_enable(int event_id)
  48. {
  49. struct ftrace_event_call *event;
  50. int ret = -EINVAL;
  51. mutex_lock(&event_mutex);
  52. list_for_each_entry(event, &ftrace_events, list) {
  53. if (event->id == event_id && event->profile_enable &&
  54. try_module_get(event->mod)) {
  55. ret = ftrace_profile_enable_event(event);
  56. break;
  57. }
  58. }
  59. mutex_unlock(&event_mutex);
  60. return ret;
  61. }
  62. static void ftrace_profile_disable_event(struct ftrace_event_call *event)
  63. {
  64. char *buf, *nmi_buf;
  65. if (--event->profile_count > 0)
  66. return;
  67. event->profile_disable(event);
  68. if (!--total_profile_count) {
  69. buf = perf_trace_buf;
  70. rcu_assign_pointer(perf_trace_buf, NULL);
  71. nmi_buf = perf_trace_buf_nmi;
  72. rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  73. /*
  74. * Ensure every events in profiling have finished before
  75. * releasing the buffers
  76. */
  77. synchronize_sched();
  78. free_percpu(buf);
  79. free_percpu(nmi_buf);
  80. }
  81. }
  82. void ftrace_profile_disable(int event_id)
  83. {
  84. struct ftrace_event_call *event;
  85. mutex_lock(&event_mutex);
  86. list_for_each_entry(event, &ftrace_events, list) {
  87. if (event->id == event_id) {
  88. ftrace_profile_disable_event(event);
  89. module_put(event->mod);
  90. break;
  91. }
  92. }
  93. mutex_unlock(&event_mutex);
  94. }
  95. __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
  96. int *rctxp, unsigned long *irq_flags)
  97. {
  98. struct trace_entry *entry;
  99. char *trace_buf, *raw_data;
  100. int pc, cpu;
  101. pc = preempt_count();
  102. /* Protect the per cpu buffer, begin the rcu read side */
  103. local_irq_save(*irq_flags);
  104. *rctxp = perf_swevent_get_recursion_context();
  105. if (*rctxp < 0)
  106. goto err_recursion;
  107. cpu = smp_processor_id();
  108. if (in_nmi())
  109. trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
  110. else
  111. trace_buf = rcu_dereference_sched(perf_trace_buf);
  112. if (!trace_buf)
  113. goto err;
  114. raw_data = per_cpu_ptr(trace_buf, cpu);
  115. /* zero the dead bytes from align to not leak stack to user */
  116. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  117. entry = (struct trace_entry *)raw_data;
  118. tracing_generic_entry_update(entry, *irq_flags, pc);
  119. entry->type = type;
  120. return raw_data;
  121. err:
  122. perf_swevent_put_recursion_context(*rctxp);
  123. err_recursion:
  124. local_irq_restore(*irq_flags);
  125. return NULL;
  126. }
  127. EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);