trace_event_perf.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. static char *perf_trace_buf[4];
  11. /*
  12. * Force it to be aligned to unsigned long to avoid misaligned accesses
  13. * suprises
  14. */
  15. typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  16. perf_trace_t;
  17. /* Count the events in use (per event id, not per instance) */
  18. static int total_ref_count;
  19. static int perf_trace_event_init(struct ftrace_event_call *tp_event,
  20. struct perf_event *p_event)
  21. {
  22. struct hlist_head *list;
  23. int ret = -ENOMEM;
  24. int cpu;
  25. p_event->tp_event = tp_event;
  26. if (tp_event->perf_refcount++ > 0)
  27. return 0;
  28. list = alloc_percpu(struct hlist_head);
  29. if (!list)
  30. goto fail;
  31. for_each_possible_cpu(cpu)
  32. INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
  33. tp_event->perf_events = list;
  34. if (!total_ref_count) {
  35. char *buf;
  36. int i;
  37. for (i = 0; i < 4; i++) {
  38. buf = (char *)alloc_percpu(perf_trace_t);
  39. if (!buf)
  40. goto fail;
  41. perf_trace_buf[i] = buf;
  42. }
  43. }
  44. if (tp_event->class->reg)
  45. ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
  46. else
  47. ret = tracepoint_probe_register(tp_event->name,
  48. tp_event->class->perf_probe,
  49. tp_event);
  50. if (ret)
  51. goto fail;
  52. total_ref_count++;
  53. return 0;
  54. fail:
  55. if (!total_ref_count) {
  56. int i;
  57. for (i = 0; i < 4; i++) {
  58. free_percpu(perf_trace_buf[i]);
  59. perf_trace_buf[i] = NULL;
  60. }
  61. }
  62. if (!--tp_event->perf_refcount) {
  63. free_percpu(tp_event->perf_events);
  64. tp_event->perf_events = NULL;
  65. }
  66. return ret;
  67. }
  68. int perf_trace_init(struct perf_event *p_event)
  69. {
  70. struct ftrace_event_call *tp_event;
  71. int event_id = p_event->attr.config;
  72. int ret = -EINVAL;
  73. mutex_lock(&event_mutex);
  74. list_for_each_entry(tp_event, &ftrace_events, list) {
  75. if (tp_event->event.type == event_id &&
  76. tp_event->class &&
  77. (tp_event->class->perf_probe ||
  78. tp_event->class->reg) &&
  79. try_module_get(tp_event->mod)) {
  80. ret = perf_trace_event_init(tp_event, p_event);
  81. break;
  82. }
  83. }
  84. mutex_unlock(&event_mutex);
  85. return ret;
  86. }
  87. int perf_trace_enable(struct perf_event *p_event)
  88. {
  89. struct ftrace_event_call *tp_event = p_event->tp_event;
  90. struct hlist_head *list;
  91. list = tp_event->perf_events;
  92. if (WARN_ON_ONCE(!list))
  93. return -EINVAL;
  94. list = this_cpu_ptr(list);
  95. hlist_add_head_rcu(&p_event->hlist_entry, list);
  96. return 0;
  97. }
  98. void perf_trace_disable(struct perf_event *p_event)
  99. {
  100. hlist_del_rcu(&p_event->hlist_entry);
  101. }
  102. void perf_trace_destroy(struct perf_event *p_event)
  103. {
  104. struct ftrace_event_call *tp_event = p_event->tp_event;
  105. int i;
  106. mutex_lock(&event_mutex);
  107. if (--tp_event->perf_refcount > 0)
  108. goto out;
  109. if (tp_event->class->reg)
  110. tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
  111. else
  112. tracepoint_probe_unregister(tp_event->name,
  113. tp_event->class->perf_probe,
  114. tp_event);
  115. /*
  116. * Ensure our callback won't be called anymore. See
  117. * tracepoint_probe_unregister() and __DO_TRACE().
  118. */
  119. synchronize_sched();
  120. free_percpu(tp_event->perf_events);
  121. tp_event->perf_events = NULL;
  122. if (!--total_ref_count) {
  123. for (i = 0; i < 4; i++) {
  124. free_percpu(perf_trace_buf[i]);
  125. perf_trace_buf[i] = NULL;
  126. }
  127. }
  128. out:
  129. mutex_unlock(&event_mutex);
  130. }
  131. __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
  132. struct pt_regs *regs, int *rctxp)
  133. {
  134. struct trace_entry *entry;
  135. unsigned long flags;
  136. char *raw_data;
  137. int pc;
  138. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
  139. pc = preempt_count();
  140. *rctxp = perf_swevent_get_recursion_context();
  141. if (*rctxp < 0)
  142. return NULL;
  143. raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
  144. /* zero the dead bytes from align to not leak stack to user */
  145. memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
  146. entry = (struct trace_entry *)raw_data;
  147. local_save_flags(flags);
  148. tracing_generic_entry_update(entry, flags, pc);
  149. entry->type = type;
  150. return raw_data;
  151. }
  152. EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);