trace.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. #ifndef _LINUX_KERNEL_TRACE_H
  2. #define _LINUX_KERNEL_TRACE_H
  3. #include <linux/fs.h>
  4. #include <asm/atomic.h>
  5. #include <linux/sched.h>
  6. #include <linux/clocksource.h>
  7. /*
  8. * Function trace entry - function address and parent function addres:
  9. */
  10. struct ftrace_entry {
  11. unsigned long ip;
  12. unsigned long parent_ip;
  13. };
  14. /*
  15. * Context switch trace entry - which task (and prio) we switched from/to:
  16. */
  17. struct ctx_switch_entry {
  18. unsigned int prev_pid;
  19. unsigned char prev_prio;
  20. unsigned char prev_state;
  21. unsigned int next_pid;
  22. unsigned char next_prio;
  23. };
  24. /*
  25. * The trace entry - the most basic unit of tracing. This is what
  26. * is printed in the end as a single line in the trace output, such as:
  27. *
  28. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  29. */
  30. struct trace_entry {
  31. char type;
  32. char cpu;
  33. char flags;
  34. char preempt_count;
  35. int pid;
  36. cycle_t t;
  37. unsigned long idx;
  38. union {
  39. struct ftrace_entry fn;
  40. struct ctx_switch_entry ctx;
  41. };
  42. };
  43. #define TRACE_ENTRY_SIZE sizeof(struct trace_entry)
  44. /*
  45. * The CPU trace array - it consists of thousands of trace entries
  46. * plus some other descriptor data: (for example which task started
  47. * the trace, etc.)
  48. */
  49. struct trace_array_cpu {
  50. struct list_head trace_pages;
  51. atomic_t disabled;
  52. cycle_t time_offset;
  53. /* these fields get copied into max-trace: */
  54. unsigned trace_head_idx;
  55. unsigned trace_tail_idx;
  56. void *trace_head; /* producer */
  57. void *trace_tail; /* consumer */
  58. unsigned long trace_idx;
  59. unsigned long saved_latency;
  60. unsigned long critical_start;
  61. unsigned long critical_end;
  62. unsigned long critical_sequence;
  63. unsigned long nice;
  64. unsigned long policy;
  65. unsigned long rt_priority;
  66. cycle_t preempt_timestamp;
  67. pid_t pid;
  68. uid_t uid;
  69. char comm[TASK_COMM_LEN];
  70. };
  71. struct trace_iterator;
  72. /*
  73. * The trace array - an array of per-CPU trace arrays. This is the
  74. * highest level data structure that individual tracers deal with.
  75. * They have on/off state as well:
  76. */
  77. struct trace_array {
  78. unsigned long entries;
  79. long ctrl;
  80. int cpu;
  81. cycle_t time_start;
  82. struct trace_array_cpu *data[NR_CPUS];
  83. };
  84. /*
  85. * A specific tracer, represented by methods that operate on a trace array:
  86. */
  87. struct tracer {
  88. const char *name;
  89. void (*init)(struct trace_array *tr);
  90. void (*reset)(struct trace_array *tr);
  91. void (*open)(struct trace_iterator *iter);
  92. void (*close)(struct trace_iterator *iter);
  93. void (*start)(struct trace_iterator *iter);
  94. void (*stop)(struct trace_iterator *iter);
  95. void (*ctrl_update)(struct trace_array *tr);
  96. #ifdef CONFIG_FTRACE_STARTUP_TEST
  97. int (*selftest)(struct tracer *trace,
  98. struct trace_array *tr);
  99. #endif
  100. struct tracer *next;
  101. int print_max;
  102. };
  103. /*
  104. * Trace iterator - used by printout routines who present trace
  105. * results to users and which routines might sleep, etc:
  106. */
  107. struct trace_iterator {
  108. struct trace_array *tr;
  109. struct tracer *trace;
  110. struct trace_entry *ent;
  111. int cpu;
  112. struct trace_entry *prev_ent;
  113. int prev_cpu;
  114. unsigned long iter_flags;
  115. loff_t pos;
  116. unsigned long next_idx[NR_CPUS];
  117. struct list_head *next_page[NR_CPUS];
  118. unsigned next_page_idx[NR_CPUS];
  119. long idx;
  120. };
  121. void notrace tracing_reset(struct trace_array_cpu *data);
  122. int tracing_open_generic(struct inode *inode, struct file *filp);
  123. struct dentry *tracing_init_dentry(void);
  124. void ftrace(struct trace_array *tr,
  125. struct trace_array_cpu *data,
  126. unsigned long ip,
  127. unsigned long parent_ip,
  128. unsigned long flags);
  129. void tracing_sched_switch_trace(struct trace_array *tr,
  130. struct trace_array_cpu *data,
  131. struct task_struct *prev,
  132. struct task_struct *next,
  133. unsigned long flags);
  134. void tracing_record_cmdline(struct task_struct *tsk);
  135. void tracing_start_function_trace(void);
  136. void tracing_stop_function_trace(void);
  137. int register_tracer(struct tracer *type);
  138. void unregister_tracer(struct tracer *type);
  139. extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  140. extern unsigned long tracing_max_latency;
  141. extern unsigned long tracing_thresh;
  142. void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
  143. void update_max_tr_single(struct trace_array *tr,
  144. struct task_struct *tsk, int cpu);
  145. static inline notrace cycle_t now(int cpu)
  146. {
  147. return cpu_clock(cpu);
  148. }
  149. #ifdef CONFIG_SCHED_TRACER
  150. extern void notrace
  151. wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
  152. #else
  153. static inline void
  154. wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
  155. {
  156. }
  157. #endif
  158. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  159. typedef void
  160. (*tracer_switch_func_t)(void *private,
  161. struct task_struct *prev,
  162. struct task_struct *next);
  163. struct tracer_switch_ops {
  164. tracer_switch_func_t func;
  165. void *private;
  166. struct tracer_switch_ops *next;
  167. };
  168. extern int register_tracer_switch(struct tracer_switch_ops *ops);
  169. extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
  170. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  171. #ifdef CONFIG_DYNAMIC_FTRACE
  172. extern unsigned long ftrace_update_tot_cnt;
  173. #endif
  174. #ifdef CONFIG_FTRACE_STARTUP_TEST
  175. #ifdef CONFIG_FTRACE
  176. extern int trace_selftest_startup_function(struct tracer *trace,
  177. struct trace_array *tr);
  178. #endif
  179. #ifdef CONFIG_IRQSOFF_TRACER
  180. extern int trace_selftest_startup_irqsoff(struct tracer *trace,
  181. struct trace_array *tr);
  182. #endif
  183. #ifdef CONFIG_PREEMPT_TRACER
  184. extern int trace_selftest_startup_preemptoff(struct tracer *trace,
  185. struct trace_array *tr);
  186. #endif
  187. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  188. extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
  189. struct trace_array *tr);
  190. #endif
  191. #ifdef CONFIG_SCHED_TRACER
  192. extern int trace_selftest_startup_wakeup(struct tracer *trace,
  193. struct trace_array *tr);
  194. #endif
  195. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  196. extern int trace_selftest_startup_sched_switch(struct tracer *trace,
  197. struct trace_array *tr);
  198. #endif
  199. #endif /* CONFIG_FTRACE_STARTUP_TEST */
  200. extern void *head_page(struct trace_array_cpu *data);
  201. #endif /* _LINUX_KERNEL_TRACE_H */