ftrace_event.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #ifndef _LINUX_FTRACE_EVENT_H
  2. #define _LINUX_FTRACE_EVENT_H
  3. #include <linux/trace_seq.h>
  4. #include <linux/ring_buffer.h>
  5. #include <linux/percpu.h>
  6. struct trace_array;
  7. struct tracer;
  8. struct dentry;
  9. DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
  10. struct trace_print_flags {
  11. unsigned long mask;
  12. const char *name;
  13. };
  14. const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  15. unsigned long flags,
  16. const struct trace_print_flags *flag_array);
  17. const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  18. const struct trace_print_flags *symbol_array);
  19. /*
  20. * The trace entry - the most basic unit of tracing. This is what
  21. * is printed in the end as a single line in the trace output, such as:
  22. *
  23. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  24. */
  25. struct trace_entry {
  26. unsigned short type;
  27. unsigned char flags;
  28. unsigned char preempt_count;
  29. int pid;
  30. int tgid;
  31. };
  32. #define FTRACE_MAX_EVENT \
  33. ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  34. /*
  35. * Trace iterator - used by printout routines who present trace
  36. * results to users and which routines might sleep, etc:
  37. */
  38. struct trace_iterator {
  39. struct trace_array *tr;
  40. struct tracer *trace;
  41. void *private;
  42. int cpu_file;
  43. struct mutex mutex;
  44. struct ring_buffer_iter *buffer_iter[NR_CPUS];
  45. unsigned long iter_flags;
  46. /* The below is zeroed out in pipe_read */
  47. struct trace_seq seq;
  48. struct trace_entry *ent;
  49. int cpu;
  50. u64 ts;
  51. loff_t pos;
  52. long idx;
  53. cpumask_var_t started;
  54. };
  55. typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
  56. int flags);
  57. struct trace_event {
  58. struct hlist_node node;
  59. struct list_head list;
  60. int type;
  61. trace_print_func trace;
  62. trace_print_func raw;
  63. trace_print_func hex;
  64. trace_print_func binary;
  65. };
  66. extern int register_ftrace_event(struct trace_event *event);
  67. extern int unregister_ftrace_event(struct trace_event *event);
  68. /* Return values for print_line callback */
  69. enum print_line_t {
  70. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  71. TRACE_TYPE_HANDLED = 1,
  72. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  73. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  74. };
  75. struct ring_buffer_event *
  76. trace_current_buffer_lock_reserve(int type, unsigned long len,
  77. unsigned long flags, int pc);
  78. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  79. unsigned long flags, int pc);
  80. void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
  81. unsigned long flags, int pc);
  82. void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
  83. void tracing_record_cmdline(struct task_struct *tsk);
  84. struct ftrace_event_call {
  85. struct list_head list;
  86. char *name;
  87. char *system;
  88. struct dentry *dir;
  89. struct trace_event *event;
  90. int enabled;
  91. int (*regfunc)(void);
  92. void (*unregfunc)(void);
  93. int id;
  94. int (*raw_init)(void);
  95. int (*show_format)(struct trace_seq *s);
  96. int (*define_fields)(void);
  97. struct list_head fields;
  98. int filter_active;
  99. void *filter;
  100. void *mod;
  101. atomic_t profile_count;
  102. int (*profile_enable)(struct ftrace_event_call *);
  103. void (*profile_disable)(struct ftrace_event_call *);
  104. };
  105. #define MAX_FILTER_PRED 32
  106. #define MAX_FILTER_STR_VAL 128
  107. extern int init_preds(struct ftrace_event_call *call);
  108. extern void destroy_preds(struct ftrace_event_call *call);
  109. extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
  110. extern int filter_current_check_discard(struct ftrace_event_call *call,
  111. void *rec,
  112. struct ring_buffer_event *event);
  113. extern int trace_define_field(struct ftrace_event_call *call, char *type,
  114. char *name, int offset, int size, int is_signed);
  115. #define is_signed_type(type) (((type)(-1)) < 0)
  116. int trace_set_clr_event(const char *system, const char *event, int set);
  117. /*
  118. * The double __builtin_constant_p is because gcc will give us an error
  119. * if we try to allocate the static variable to fmt if it is not a
  120. * constant. Even with the outer if statement optimizing out.
  121. */
  122. #define event_trace_printk(ip, fmt, args...) \
  123. do { \
  124. __trace_printk_check_format(fmt, ##args); \
  125. tracing_record_cmdline(current); \
  126. if (__builtin_constant_p(fmt)) { \
  127. static const char *trace_printk_fmt \
  128. __attribute__((section("__trace_printk_fmt"))) = \
  129. __builtin_constant_p(fmt) ? fmt : NULL; \
  130. \
  131. __trace_bprintk(ip, trace_printk_fmt, ##args); \
  132. } else \
  133. __trace_printk(ip, fmt, ##args); \
  134. } while (0)
  135. #define __common_field(type, item, is_signed) \
  136. ret = trace_define_field(event_call, #type, "common_" #item, \
  137. offsetof(typeof(field.ent), item), \
  138. sizeof(field.ent.item), is_signed); \
  139. if (ret) \
  140. return ret;
  141. #endif /* _LINUX_FTRACE_EVENT_H */