trace_event_perf.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * trace event based perf event profiling/tracing
  3. *
  4. * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5. * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kprobes.h>
  9. #include "trace.h"
  10. static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
  11. /*
  12. * Force it to be aligned to unsigned long to avoid misaligned accesses
  13. * suprises
  14. */
  15. typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
  16. perf_trace_t;
  17. /* Count the events in use (per event id, not per instance) */
  18. static int total_ref_count;
  19. static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
  20. struct perf_event *p_event)
  21. {
  22. /* No tracing, just counting, so no obvious leak */
  23. if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
  24. return 0;
  25. /* Some events are ok to be traced by non-root users... */
  26. if (p_event->attach_state == PERF_ATTACH_TASK) {
  27. if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
  28. return 0;
  29. }
  30. /*
  31. * ...otherwise raw tracepoint data can be a severe data leak,
  32. * only allow root to have these.
  33. */
  34. if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
  35. return -EPERM;
  36. return 0;
  37. }
  38. static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
  39. struct perf_event *p_event)
  40. {
  41. struct hlist_head __percpu *list;
  42. int ret = -ENOMEM;
  43. int cpu;
  44. p_event->tp_event = tp_event;
  45. if (tp_event->perf_refcount++ > 0)
  46. return 0;
  47. list = alloc_percpu(struct hlist_head);
  48. if (!list)
  49. goto fail;
  50. for_each_possible_cpu(cpu)
  51. INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
  52. tp_event->perf_events = list;
  53. if (!total_ref_count) {
  54. char __percpu *buf;
  55. int i;
  56. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  57. buf = (char __percpu *)alloc_percpu(perf_trace_t);
  58. if (!buf)
  59. goto fail;
  60. perf_trace_buf[i] = buf;
  61. }
  62. }
  63. ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
  64. if (ret)
  65. goto fail;
  66. total_ref_count++;
  67. return 0;
  68. fail:
  69. if (!total_ref_count) {
  70. int i;
  71. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  72. free_percpu(perf_trace_buf[i]);
  73. perf_trace_buf[i] = NULL;
  74. }
  75. }
  76. if (!--tp_event->perf_refcount) {
  77. free_percpu(tp_event->perf_events);
  78. tp_event->perf_events = NULL;
  79. }
  80. return ret;
  81. }
  82. static void perf_trace_event_unreg(struct perf_event *p_event)
  83. {
  84. struct ftrace_event_call *tp_event = p_event->tp_event;
  85. int i;
  86. if (--tp_event->perf_refcount > 0)
  87. goto out;
  88. tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
  89. /*
  90. * Ensure our callback won't be called anymore. The buffers
  91. * will be freed after that.
  92. */
  93. tracepoint_synchronize_unregister();
  94. free_percpu(tp_event->perf_events);
  95. tp_event->perf_events = NULL;
  96. if (!--total_ref_count) {
  97. for (i = 0; i < PERF_NR_CONTEXTS; i++) {
  98. free_percpu(perf_trace_buf[i]);
  99. perf_trace_buf[i] = NULL;
  100. }
  101. }
  102. out:
  103. module_put(tp_event->mod);
  104. }
  105. static int perf_trace_event_open(struct perf_event *p_event)
  106. {
  107. struct ftrace_event_call *tp_event = p_event->tp_event;
  108. return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
  109. }
  110. static void perf_trace_event_close(struct perf_event *p_event)
  111. {
  112. struct ftrace_event_call *tp_event = p_event->tp_event;
  113. tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
  114. }
  115. static int perf_trace_event_init(struct ftrace_event_call *tp_event,
  116. struct perf_event *p_event)
  117. {
  118. int ret;
  119. ret = perf_trace_event_perm(tp_event, p_event);
  120. if (ret)
  121. return ret;
  122. ret = perf_trace_event_reg(tp_event, p_event);
  123. if (ret)
  124. return ret;
  125. ret = perf_trace_event_open(p_event);
  126. if (ret) {
  127. perf_trace_event_unreg(p_event);
  128. return ret;
  129. }
  130. return 0;
  131. }
  132. int perf_trace_init(struct perf_event *p_event)
  133. {
  134. struct ftrace_event_call *tp_event;
  135. int event_id = p_event->attr.config;
  136. int ret = -EINVAL;
  137. mutex_lock(&event_mutex);
  138. list_for_each_entry(tp_event, &ftrace_events, list) {
  139. if (tp_event->event.type == event_id &&
  140. tp_event->class && tp_event->class->reg &&
  141. try_module_get(tp_event->mod)) {
  142. ret = perf_trace_event_init(tp_event, p_event);
  143. if (ret)
  144. module_put(tp_event->mod);
  145. break;
  146. }
  147. }
  148. mutex_unlock(&event_mutex);
  149. return ret;
  150. }
  151. void perf_trace_destroy(struct perf_event *p_event)
  152. {
  153. mutex_lock(&event_mutex);
  154. perf_trace_event_close(p_event);
  155. perf_trace_event_unreg(p_event);
  156. mutex_unlock(&event_mutex);
  157. }
  158. int perf_trace_add(struct perf_event *p_event, int flags)
  159. {
  160. struct ftrace_event_call *tp_event = p_event->tp_event;
  161. struct hlist_head __percpu *pcpu_list;
  162. struct hlist_head *list;
  163. pcpu_list = tp_event->perf_events;
  164. if (WARN_ON_ONCE(!pcpu_list))
  165. return -EINVAL;
  166. if (!(flags & PERF_EF_START))
  167. p_event->hw.state = PERF_HES_STOPPED;
  168. list = this_cpu_ptr(pcpu_list);
  169. hlist_add_head_rcu(&p_event->hlist_entry, list);
  170. return 0;
  171. }
  172. void perf_trace_del(struct perf_event *p_event, int flags)
  173. {
  174. hlist_del_rcu(&p_event->hlist_entry);
  175. }
  176. __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
  177. struct pt_regs *regs, int *rctxp)
  178. {
  179. struct trace_entry *entry;
  180. unsigned long flags;
  181. char *raw_data;
  182. int pc;
  183. BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
  184. pc = preempt_count();
  185. *rctxp = perf_swevent_get_recursion_context();
  186. if (*rctxp < 0)
  187. return NULL;
  188. raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
  189. /* zero the dead bytes from align to not leak stack to user */
  190. memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
  191. entry = (struct trace_entry *)raw_data;
  192. local_save_flags(flags);
  193. tracing_generic_entry_update(entry, flags, pc);
  194. entry->type = type;
  195. return raw_data;
  196. }
  197. EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);