ftrace_event.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef _LINUX_FTRACE_EVENT_H
  2. #define _LINUX_FTRACE_EVENT_H
  3. #include <linux/trace_seq.h>
  4. #include <linux/ring_buffer.h>
  5. #include <linux/percpu.h>
  6. struct trace_array;
  7. struct tracer;
  8. struct dentry;
  9. DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
  10. struct trace_print_flags {
  11. unsigned long mask;
  12. const char *name;
  13. };
  14. const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  15. unsigned long flags,
  16. const struct trace_print_flags *flag_array);
  17. const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  18. const struct trace_print_flags *symbol_array);
  19. /*
  20. * The trace entry - the most basic unit of tracing. This is what
  21. * is printed in the end as a single line in the trace output, such as:
  22. *
  23. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  24. */
  25. struct trace_entry {
  26. unsigned short type;
  27. unsigned char flags;
  28. unsigned char preempt_count;
  29. int pid;
  30. int tgid;
  31. };
  32. #define FTRACE_MAX_EVENT \
  33. ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  34. /*
  35. * Trace iterator - used by printout routines who present trace
  36. * results to users and which routines might sleep, etc:
  37. */
  38. struct trace_iterator {
  39. struct trace_array *tr;
  40. struct tracer *trace;
  41. void *private;
  42. int cpu_file;
  43. struct mutex mutex;
  44. struct ring_buffer_iter *buffer_iter[NR_CPUS];
  45. unsigned long iter_flags;
  46. /* The below is zeroed out in pipe_read */
  47. struct trace_seq seq;
  48. struct trace_entry *ent;
  49. int cpu;
  50. u64 ts;
  51. loff_t pos;
  52. long idx;
  53. cpumask_var_t started;
  54. };
  55. typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
  56. int flags);
  57. struct trace_event {
  58. struct hlist_node node;
  59. struct list_head list;
  60. int type;
  61. trace_print_func trace;
  62. trace_print_func raw;
  63. trace_print_func hex;
  64. trace_print_func binary;
  65. };
  66. extern int register_ftrace_event(struct trace_event *event);
  67. extern int unregister_ftrace_event(struct trace_event *event);
  68. /* Return values for print_line callback */
  69. enum print_line_t {
  70. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  71. TRACE_TYPE_HANDLED = 1,
  72. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  73. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  74. };
  75. void tracing_generic_entry_update(struct trace_entry *entry,
  76. unsigned long flags,
  77. int pc);
  78. struct ring_buffer_event *
  79. trace_current_buffer_lock_reserve(int type, unsigned long len,
  80. unsigned long flags, int pc);
  81. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  82. unsigned long flags, int pc);
  83. void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
  84. unsigned long flags, int pc);
  85. void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
  86. void tracing_record_cmdline(struct task_struct *tsk);
  87. struct event_filter;
  88. struct ftrace_event_call {
  89. struct list_head list;
  90. char *name;
  91. char *system;
  92. struct dentry *dir;
  93. struct trace_event *event;
  94. int enabled;
  95. int (*regfunc)(void *);
  96. void (*unregfunc)(void *);
  97. int id;
  98. int (*raw_init)(void);
  99. int (*show_format)(struct ftrace_event_call *call,
  100. struct trace_seq *s);
  101. int (*define_fields)(struct ftrace_event_call *);
  102. struct list_head fields;
  103. int filter_active;
  104. struct event_filter *filter;
  105. void *mod;
  106. void *data;
  107. atomic_t profile_count;
  108. int (*profile_enable)(struct ftrace_event_call *);
  109. void (*profile_disable)(struct ftrace_event_call *);
  110. };
  111. #define MAX_FILTER_PRED 32
  112. #define MAX_FILTER_STR_VAL 128
  113. extern int init_preds(struct ftrace_event_call *call);
  114. extern void destroy_preds(struct ftrace_event_call *call);
  115. extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
  116. extern int filter_current_check_discard(struct ftrace_event_call *call,
  117. void *rec,
  118. struct ring_buffer_event *event);
  119. enum {
  120. FILTER_OTHER = 0,
  121. FILTER_STATIC_STRING,
  122. FILTER_DYN_STRING,
  123. };
  124. extern int trace_define_field(struct ftrace_event_call *call,
  125. const char *type, const char *name,
  126. int offset, int size, int is_signed,
  127. int filter_type);
  128. extern int trace_define_common_fields(struct ftrace_event_call *call);
  129. #define is_signed_type(type) (((type)(-1)) < 0)
  130. int trace_set_clr_event(const char *system, const char *event, int set);
  131. /*
  132. * The double __builtin_constant_p is because gcc will give us an error
  133. * if we try to allocate the static variable to fmt if it is not a
  134. * constant. Even with the outer if statement optimizing out.
  135. */
  136. #define event_trace_printk(ip, fmt, args...) \
  137. do { \
  138. __trace_printk_check_format(fmt, ##args); \
  139. tracing_record_cmdline(current); \
  140. if (__builtin_constant_p(fmt)) { \
  141. static const char *trace_printk_fmt \
  142. __attribute__((section("__trace_printk_fmt"))) = \
  143. __builtin_constant_p(fmt) ? fmt : NULL; \
  144. \
  145. __trace_bprintk(ip, trace_printk_fmt, ##args); \
  146. } else \
  147. __trace_printk(ip, fmt, ##args); \
  148. } while (0)
  149. #endif /* _LINUX_FTRACE_EVENT_H */