trace_event_profile.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /*
  2. * trace event based perf counter profiling
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include "trace.h"
  9. /*
  10. * We can't use a size but a type in alloc_percpu()
  11. * So let's create a dummy type that matches the desired size
  12. */
  13. typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
  14. char *trace_profile_buf;
  15. EXPORT_SYMBOL_GPL(trace_profile_buf);
  16. char *trace_profile_buf_nmi;
  17. EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
  18. /* Count the events in use (per event id, not per instance) */
  19. static int total_profile_count;
  20. static int ftrace_profile_enable_event(struct ftrace_event_call *event)
  21. {
  22. char *buf;
  23. int ret = -ENOMEM;
  24. if (atomic_inc_return(&event->profile_count))
  25. return 0;
  26. if (!total_profile_count) {
  27. buf = (char *)alloc_percpu(profile_buf_t);
  28. if (!buf)
  29. goto fail_buf;
  30. rcu_assign_pointer(trace_profile_buf, buf);
  31. buf = (char *)alloc_percpu(profile_buf_t);
  32. if (!buf)
  33. goto fail_buf_nmi;
  34. rcu_assign_pointer(trace_profile_buf_nmi, buf);
  35. }
  36. ret = event->profile_enable();
  37. if (!ret) {
  38. total_profile_count++;
  39. return 0;
  40. }
  41. fail_buf_nmi:
  42. if (!total_profile_count) {
  43. free_percpu(trace_profile_buf_nmi);
  44. free_percpu(trace_profile_buf);
  45. trace_profile_buf_nmi = NULL;
  46. trace_profile_buf = NULL;
  47. }
  48. fail_buf:
  49. atomic_dec(&event->profile_count);
  50. return ret;
  51. }
  52. int ftrace_profile_enable(int event_id)
  53. {
  54. struct ftrace_event_call *event;
  55. int ret = -EINVAL;
  56. mutex_lock(&event_mutex);
  57. list_for_each_entry(event, &ftrace_events, list) {
  58. if (event->id == event_id && event->profile_enable &&
  59. try_module_get(event->mod)) {
  60. ret = ftrace_profile_enable_event(event);
  61. break;
  62. }
  63. }
  64. mutex_unlock(&event_mutex);
  65. return ret;
  66. }
  67. static void ftrace_profile_disable_event(struct ftrace_event_call *event)
  68. {
  69. char *buf, *nmi_buf;
  70. if (!atomic_add_negative(-1, &event->profile_count))
  71. return;
  72. event->profile_disable();
  73. if (!--total_profile_count) {
  74. buf = trace_profile_buf;
  75. rcu_assign_pointer(trace_profile_buf, NULL);
  76. nmi_buf = trace_profile_buf_nmi;
  77. rcu_assign_pointer(trace_profile_buf_nmi, NULL);
  78. /*
  79. * Ensure every events in profiling have finished before
  80. * releasing the buffers
  81. */
  82. synchronize_sched();
  83. free_percpu(buf);
  84. free_percpu(nmi_buf);
  85. }
  86. }
  87. void ftrace_profile_disable(int event_id)
  88. {
  89. struct ftrace_event_call *event;
  90. mutex_lock(&event_mutex);
  91. list_for_each_entry(event, &ftrace_events, list) {
  92. if (event->id == event_id) {
  93. ftrace_profile_disable_event(event);
  94. module_put(event->mod);
  95. break;
  96. }
  97. }
  98. mutex_unlock(&event_mutex);
  99. }