trace_event_profile.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * trace event based perf counter profiling
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include "trace.h"
  9. struct perf_trace_buf *perf_trace_buf;
  10. EXPORT_SYMBOL_GPL(perf_trace_buf);
  11. struct perf_trace_buf *perf_trace_buf_nmi;
  12. EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
  13. /* Count the events in use (per event id, not per instance) */
  14. static int total_profile_count;
  15. static int ftrace_profile_enable_event(struct ftrace_event_call *event)
  16. {
  17. struct perf_trace_buf *buf;
  18. int ret = -ENOMEM;
  19. if (atomic_inc_return(&event->profile_count))
  20. return 0;
  21. if (!total_profile_count) {
  22. buf = alloc_percpu(struct perf_trace_buf);
  23. if (!buf)
  24. goto fail_buf;
  25. rcu_assign_pointer(perf_trace_buf, buf);
  26. buf = alloc_percpu(struct perf_trace_buf);
  27. if (!buf)
  28. goto fail_buf_nmi;
  29. rcu_assign_pointer(perf_trace_buf_nmi, buf);
  30. }
  31. ret = event->profile_enable(event);
  32. if (!ret) {
  33. total_profile_count++;
  34. return 0;
  35. }
  36. fail_buf_nmi:
  37. if (!total_profile_count) {
  38. free_percpu(perf_trace_buf_nmi);
  39. free_percpu(perf_trace_buf);
  40. perf_trace_buf_nmi = NULL;
  41. perf_trace_buf = NULL;
  42. }
  43. fail_buf:
  44. atomic_dec(&event->profile_count);
  45. return ret;
  46. }
  47. int ftrace_profile_enable(int event_id)
  48. {
  49. struct ftrace_event_call *event;
  50. int ret = -EINVAL;
  51. mutex_lock(&event_mutex);
  52. list_for_each_entry(event, &ftrace_events, list) {
  53. if (event->id == event_id && event->profile_enable &&
  54. try_module_get(event->mod)) {
  55. ret = ftrace_profile_enable_event(event);
  56. break;
  57. }
  58. }
  59. mutex_unlock(&event_mutex);
  60. return ret;
  61. }
  62. static void ftrace_profile_disable_event(struct ftrace_event_call *event)
  63. {
  64. struct perf_trace_buf *buf, *nmi_buf;
  65. if (!atomic_add_negative(-1, &event->profile_count))
  66. return;
  67. event->profile_disable(event);
  68. if (!--total_profile_count) {
  69. buf = perf_trace_buf;
  70. rcu_assign_pointer(perf_trace_buf, NULL);
  71. nmi_buf = perf_trace_buf_nmi;
  72. rcu_assign_pointer(perf_trace_buf_nmi, NULL);
  73. /*
  74. * Ensure every events in profiling have finished before
  75. * releasing the buffers
  76. */
  77. synchronize_sched();
  78. free_percpu(buf);
  79. free_percpu(nmi_buf);
  80. }
  81. }
  82. void ftrace_profile_disable(int event_id)
  83. {
  84. struct ftrace_event_call *event;
  85. mutex_lock(&event_mutex);
  86. list_for_each_entry(event, &ftrace_events, list) {
  87. if (event->id == event_id) {
  88. ftrace_profile_disable_event(event);
  89. module_put(event->mod);
  90. break;
  91. }
  92. }
  93. mutex_unlock(&event_mutex);
  94. }