ftrace_event.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. #ifndef _LINUX_FTRACE_EVENT_H
  2. #define _LINUX_FTRACE_EVENT_H
  3. #include <linux/trace_seq.h>
  4. #include <linux/ring_buffer.h>
  5. struct trace_array;
  6. struct tracer;
  7. struct dentry;
  8. /*
  9. * The trace entry - the most basic unit of tracing. This is what
  10. * is printed in the end as a single line in the trace output, such as:
  11. *
  12. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  13. */
  14. struct trace_entry {
  15. unsigned short type;
  16. unsigned char flags;
  17. unsigned char preempt_count;
  18. int pid;
  19. int tgid;
  20. };
  21. #define FTRACE_MAX_EVENT \
  22. ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  23. /*
  24. * Trace iterator - used by printout routines who present trace
  25. * results to users and which routines might sleep, etc:
  26. */
  27. struct trace_iterator {
  28. struct trace_array *tr;
  29. struct tracer *trace;
  30. void *private;
  31. int cpu_file;
  32. struct mutex mutex;
  33. struct ring_buffer_iter *buffer_iter[NR_CPUS];
  34. /* The below is zeroed out in pipe_read */
  35. struct trace_seq seq;
  36. struct trace_entry *ent;
  37. int cpu;
  38. u64 ts;
  39. unsigned long iter_flags;
  40. loff_t pos;
  41. long idx;
  42. cpumask_var_t started;
  43. };
  44. typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
  45. int flags);
  46. struct trace_event {
  47. struct hlist_node node;
  48. struct list_head list;
  49. int type;
  50. trace_print_func trace;
  51. trace_print_func raw;
  52. trace_print_func hex;
  53. trace_print_func binary;
  54. };
  55. extern int register_ftrace_event(struct trace_event *event);
  56. extern int unregister_ftrace_event(struct trace_event *event);
  57. /* Return values for print_line callback */
  58. enum print_line_t {
  59. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  60. TRACE_TYPE_HANDLED = 1,
  61. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  62. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  63. };
  64. struct ring_buffer_event *
  65. trace_current_buffer_lock_reserve(int type, unsigned long len,
  66. unsigned long flags, int pc);
  67. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  68. unsigned long flags, int pc);
  69. void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
  70. unsigned long flags, int pc);
  71. void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
  72. void tracing_record_cmdline(struct task_struct *tsk);
  73. struct ftrace_event_call {
  74. struct list_head list;
  75. char *name;
  76. char *system;
  77. struct dentry *dir;
  78. struct trace_event *event;
  79. int enabled;
  80. int (*regfunc)(void);
  81. void (*unregfunc)(void);
  82. int id;
  83. int (*raw_init)(void);
  84. int (*show_format)(struct trace_seq *s);
  85. int (*define_fields)(void);
  86. struct list_head fields;
  87. int filter_active;
  88. void *filter;
  89. void *mod;
  90. #ifdef CONFIG_EVENT_PROFILE
  91. atomic_t profile_count;
  92. int (*profile_enable)(struct ftrace_event_call *);
  93. void (*profile_disable)(struct ftrace_event_call *);
  94. #endif
  95. };
  96. #define MAX_FILTER_PRED 8
  97. #define MAX_FILTER_STR_VAL 128
  98. extern int init_preds(struct ftrace_event_call *call);
  99. extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
  100. extern int filter_current_check_discard(struct ftrace_event_call *call,
  101. void *rec,
  102. struct ring_buffer_event *event);
  103. extern int trace_define_field(struct ftrace_event_call *call, char *type,
  104. char *name, int offset, int size);
  105. /*
  106. * The double __builtin_constant_p is because gcc will give us an error
  107. * if we try to allocate the static variable to fmt if it is not a
  108. * constant. Even with the outer if statement optimizing out.
  109. */
  110. #define event_trace_printk(ip, fmt, args...) \
  111. do { \
  112. __trace_printk_check_format(fmt, ##args); \
  113. tracing_record_cmdline(current); \
  114. if (__builtin_constant_p(fmt)) { \
  115. static const char *trace_printk_fmt \
  116. __attribute__((section("__trace_printk_fmt"))) = \
  117. __builtin_constant_p(fmt) ? fmt : NULL; \
  118. \
  119. __trace_bprintk(ip, trace_printk_fmt, ##args); \
  120. } else \
  121. __trace_printk(ip, fmt, ##args); \
  122. } while (0)
  123. #define __common_field(type, item) \
  124. ret = trace_define_field(event_call, #type, "common_" #item, \
  125. offsetof(typeof(field.ent), item), \
  126. sizeof(field.ent.item)); \
  127. if (ret) \
  128. return ret;
  129. #endif /* _LINUX_FTRACE_EVENT_H */