trace_event_profile.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * trace event based perf counter profiling
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include "trace.h"
  9. char *perf_trace_buf;
  10. EXPORT_SYMBOL_GPL(perf_trace_buf);
  11. char *perf_trace_buf_nmi;
  12. EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
  13. typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
  14. /* Count the events in use (per event id, not per instance) */
  15. static int total_profile_count;
  16. static int ftrace_profile_enable_event(struct ftrace_event_call *event)
  17. {
  18. char *buf;
  19. int ret = -ENOMEM;
  20. if (atomic_inc_return(&event->profile_count))
  21. return 0;
  22. if (!total_profile_count) {
  23. buf = (char *)alloc_percpu(perf_trace_t);
  24. if (!buf)
  25. goto fail_buf;
  26. rcu_assign_pointer(perf_trace_buf, buf);
  27. buf = (char *)alloc_percpu(perf_trace_t);
  28. if (!buf)
  29. goto fail_buf_nmi;
  30. rcu_assign_pointer(perf_trace_buf_nmi, buf);
  31. }
  32. ret = event->profile_enable(event);
  33. if (!ret) {
  34. total_profile_count++;
  35. return 0;
  36. }
  37. fail_buf_nmi:
  38. if (!total_profile_count) {
  39. free_percpu(perf_trace_buf_nmi);
  40. free_percpu(perf_trace_buf);
  41. perf_trace_buf_nmi = NULL;
  42. perf_trace_buf = NULL;
  43. }
  44. fail_buf:
  45. atomic_dec(&event->profile_count);
  46. return ret;
  47. }
  48. int ftrace_profile_enable(int event_id)
  49. {
  50. struct ftrace_event_call *event;
  51. int ret = -EINVAL;
  52. mutex_lock(&event_mutex);
  53. list_for_each_entry(event, &ftrace_events, list) {
  54. if (event->id == event_id && event->profile_enable &&
  55. try_module_get(event->mod)) {
  56. ret = ftrace_profile_enable_event(event);
  57. break;
  58. }
  59. }
  60. mutex_unlock(&event_mutex);
  61. return ret;
  62. }
  63. static void ftrace_profile_disable_event(struct ftrace_event_call *event)
  64. {
  65. char *buf, *nmi_buf;
  66. if (!atomic_add_negative(-1, &event->profile_count))
  67. return;
  68. event->profile_disable(event);
  69. if (!--total_profile_count) {
  70. buf = perf_trace_buf;
  71. rcu_assign_pointer(perf_trace_buf, NULL);
  72. nmi_buf = perf_trace_buf_nmi;
  73. rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  74. /*
  75. * Ensure every events in profiling have finished before
  76. * releasing the buffers
  77. */
  78. synchronize_sched();
  79. free_percpu(buf);
  80. free_percpu(nmi_buf);
  81. }
  82. }
  83. void ftrace_profile_disable(int event_id)
  84. {
  85. struct ftrace_event_call *event;
  86. mutex_lock(&event_mutex);
  87. list_for_each_entry(event, &ftrace_events, list) {
  88. if (event->id == event_id) {
  89. ftrace_profile_disable_event(event);
  90. module_put(event->mod);
  91. break;
  92. }
  93. }
  94. mutex_unlock(&event_mutex);
  95. }