trace.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803
  1. #ifndef _LINUX_KERNEL_TRACE_H
  2. #define _LINUX_KERNEL_TRACE_H
  3. #include <linux/fs.h>
  4. #include <asm/atomic.h>
  5. #include <linux/sched.h>
  6. #include <linux/clocksource.h>
  7. #include <linux/ring_buffer.h>
  8. #include <linux/mmiotrace.h>
  9. #include <linux/tracepoint.h>
  10. #include <linux/ftrace.h>
  11. #include <trace/boot.h>
  12. #include <linux/kmemtrace.h>
  13. #include <linux/hw_breakpoint.h>
  14. #include <linux/trace_seq.h>
  15. #include <linux/ftrace_event.h>
  16. enum trace_type {
  17. __TRACE_FIRST_TYPE = 0,
  18. TRACE_FN,
  19. TRACE_CTX,
  20. TRACE_WAKE,
  21. TRACE_STACK,
  22. TRACE_PRINT,
  23. TRACE_BPRINT,
  24. TRACE_SPECIAL,
  25. TRACE_MMIO_RW,
  26. TRACE_MMIO_MAP,
  27. TRACE_BRANCH,
  28. TRACE_BOOT_CALL,
  29. TRACE_BOOT_RET,
  30. TRACE_GRAPH_RET,
  31. TRACE_GRAPH_ENT,
  32. TRACE_USER_STACK,
  33. TRACE_HW_BRANCHES,
  34. TRACE_KMEM_ALLOC,
  35. TRACE_KMEM_FREE,
  36. TRACE_BLK,
  37. TRACE_KSYM,
  38. __TRACE_LAST_TYPE,
  39. };
  40. enum kmemtrace_type_id {
  41. KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
  42. KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
  43. KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
  44. };
  45. extern struct tracer boot_tracer;
  46. #undef __field
  47. #define __field(type, item) type item;
  48. #undef __field_struct
  49. #define __field_struct(type, item) __field(type, item)
  50. #undef __field_desc
  51. #define __field_desc(type, container, item)
  52. #undef __array
  53. #define __array(type, item, size) type item[size];
  54. #undef __array_desc
  55. #define __array_desc(type, container, item, size)
  56. #undef __dynamic_array
  57. #define __dynamic_array(type, item) type item[];
  58. #undef F_STRUCT
  59. #define F_STRUCT(args...) args
  60. #undef FTRACE_ENTRY
  61. #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
  62. struct struct_name { \
  63. struct trace_entry ent; \
  64. tstruct \
  65. }
  66. #undef TP_ARGS
  67. #define TP_ARGS(args...) args
  68. #undef FTRACE_ENTRY_DUP
  69. #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
  70. #include "trace_entries.h"
  71. /*
  72. * syscalls are special, and need special handling, this is why
  73. * they are not included in trace_entries.h
  74. */
  75. struct syscall_trace_enter {
  76. struct trace_entry ent;
  77. int nr;
  78. unsigned long args[];
  79. };
  80. struct syscall_trace_exit {
  81. struct trace_entry ent;
  82. int nr;
  83. long ret;
  84. };
  85. struct kprobe_trace_entry {
  86. struct trace_entry ent;
  87. unsigned long ip;
  88. int nargs;
  89. unsigned long args[];
  90. };
  91. #define SIZEOF_KPROBE_TRACE_ENTRY(n) \
  92. (offsetof(struct kprobe_trace_entry, args) + \
  93. (sizeof(unsigned long) * (n)))
  94. struct kretprobe_trace_entry {
  95. struct trace_entry ent;
  96. unsigned long func;
  97. unsigned long ret_ip;
  98. int nargs;
  99. unsigned long args[];
  100. };
  101. #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
  102. (offsetof(struct kretprobe_trace_entry, args) + \
  103. (sizeof(unsigned long) * (n)))
  104. /*
  105. * trace_flag_type is an enumeration that holds different
  106. * states when a trace occurs. These are:
  107. * IRQS_OFF - interrupts were disabled
  108. * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
  109. * NEED_RESCHED - reschedule is requested
  110. * HARDIRQ - inside an interrupt handler
  111. * SOFTIRQ - inside a softirq handler
  112. */
  113. enum trace_flag_type {
  114. TRACE_FLAG_IRQS_OFF = 0x01,
  115. TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
  116. TRACE_FLAG_NEED_RESCHED = 0x04,
  117. TRACE_FLAG_HARDIRQ = 0x08,
  118. TRACE_FLAG_SOFTIRQ = 0x10,
  119. };
  120. #define TRACE_BUF_SIZE 1024
  121. /*
  122. * The CPU trace array - it consists of thousands of trace entries
  123. * plus some other descriptor data: (for example which task started
  124. * the trace, etc.)
  125. */
  126. struct trace_array_cpu {
  127. atomic_t disabled;
  128. void *buffer_page; /* ring buffer spare */
  129. unsigned long saved_latency;
  130. unsigned long critical_start;
  131. unsigned long critical_end;
  132. unsigned long critical_sequence;
  133. unsigned long nice;
  134. unsigned long policy;
  135. unsigned long rt_priority;
  136. unsigned long skipped_entries;
  137. cycle_t preempt_timestamp;
  138. pid_t pid;
  139. uid_t uid;
  140. char comm[TASK_COMM_LEN];
  141. };
  142. /*
  143. * The trace array - an array of per-CPU trace arrays. This is the
  144. * highest level data structure that individual tracers deal with.
  145. * They have on/off state as well:
  146. */
  147. struct trace_array {
  148. struct ring_buffer *buffer;
  149. unsigned long entries;
  150. int cpu;
  151. cycle_t time_start;
  152. struct task_struct *waiter;
  153. struct trace_array_cpu *data[NR_CPUS];
  154. };
  155. #define FTRACE_CMP_TYPE(var, type) \
  156. __builtin_types_compatible_p(typeof(var), type *)
  157. #undef IF_ASSIGN
  158. #define IF_ASSIGN(var, entry, etype, id) \
  159. if (FTRACE_CMP_TYPE(var, etype)) { \
  160. var = (typeof(var))(entry); \
  161. WARN_ON(id && (entry)->type != id); \
  162. break; \
  163. }
  164. /* Will cause compile errors if type is not found. */
  165. extern void __ftrace_bad_type(void);
  166. /*
  167. * The trace_assign_type is a verifier that the entry type is
  168. * the same as the type being assigned. To add new types simply
  169. * add a line with the following format:
  170. *
  171. * IF_ASSIGN(var, ent, type, id);
  172. *
  173. * Where "type" is the trace type that includes the trace_entry
  174. * as the "ent" item. And "id" is the trace identifier that is
  175. * used in the trace_type enum.
  176. *
  177. * If the type can have more than one id, then use zero.
  178. */
  179. #define trace_assign_type(var, ent) \
  180. do { \
  181. IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
  182. IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
  183. IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
  184. IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
  185. IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
  186. IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
  187. IF_ASSIGN(var, ent, struct special_entry, 0); \
  188. IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
  189. TRACE_MMIO_RW); \
  190. IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
  191. TRACE_MMIO_MAP); \
  192. IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
  193. IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
  194. IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
  195. IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
  196. TRACE_GRAPH_ENT); \
  197. IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
  198. TRACE_GRAPH_RET); \
  199. IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
  200. IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
  201. TRACE_KMEM_ALLOC); \
  202. IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
  203. TRACE_KMEM_FREE); \
  204. IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
  205. __ftrace_bad_type(); \
  206. } while (0)
  207. /*
  208. * An option specific to a tracer. This is a boolean value.
  209. * The bit is the bit index that sets its value on the
  210. * flags value in struct tracer_flags.
  211. */
  212. struct tracer_opt {
  213. const char *name; /* Will appear on the trace_options file */
  214. u32 bit; /* Mask assigned in val field in tracer_flags */
  215. };
  216. /*
  217. * The set of specific options for a tracer. Your tracer
  218. * have to set the initial value of the flags val.
  219. */
  220. struct tracer_flags {
  221. u32 val;
  222. struct tracer_opt *opts;
  223. };
  224. /* Makes more easy to define a tracer opt */
  225. #define TRACER_OPT(s, b) .name = #s, .bit = b
  226. /**
  227. * struct tracer - a specific tracer and its callbacks to interact with debugfs
  228. * @name: the name chosen to select it on the available_tracers file
  229. * @init: called when one switches to this tracer (echo name > current_tracer)
  230. * @reset: called when one switches to another tracer
  231. * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
  232. * @stop: called when tracing is paused (echo 0 > tracing_enabled)
  233. * @open: called when the trace file is opened
  234. * @pipe_open: called when the trace_pipe file is opened
  235. * @wait_pipe: override how the user waits for traces on trace_pipe
  236. * @close: called when the trace file is released
  237. * @pipe_close: called when the trace_pipe file is released
  238. * @read: override the default read callback on trace_pipe
  239. * @splice_read: override the default splice_read callback on trace_pipe
  240. * @selftest: selftest to run on boot (see trace_selftest.c)
  241. * @print_headers: override the first lines that describe your columns
  242. * @print_line: callback that prints a trace
  243. * @set_flag: signals one of your private flags changed (trace_options file)
  244. * @flags: your private flags
  245. */
  246. struct tracer {
  247. const char *name;
  248. int (*init)(struct trace_array *tr);
  249. void (*reset)(struct trace_array *tr);
  250. void (*start)(struct trace_array *tr);
  251. void (*stop)(struct trace_array *tr);
  252. void (*open)(struct trace_iterator *iter);
  253. void (*pipe_open)(struct trace_iterator *iter);
  254. void (*wait_pipe)(struct trace_iterator *iter);
  255. void (*close)(struct trace_iterator *iter);
  256. void (*pipe_close)(struct trace_iterator *iter);
  257. ssize_t (*read)(struct trace_iterator *iter,
  258. struct file *filp, char __user *ubuf,
  259. size_t cnt, loff_t *ppos);
  260. ssize_t (*splice_read)(struct trace_iterator *iter,
  261. struct file *filp,
  262. loff_t *ppos,
  263. struct pipe_inode_info *pipe,
  264. size_t len,
  265. unsigned int flags);
  266. #ifdef CONFIG_FTRACE_STARTUP_TEST
  267. int (*selftest)(struct tracer *trace,
  268. struct trace_array *tr);
  269. #endif
  270. void (*print_header)(struct seq_file *m);
  271. enum print_line_t (*print_line)(struct trace_iterator *iter);
  272. /* If you handled the flag setting, return 0 */
  273. int (*set_flag)(u32 old_flags, u32 bit, int set);
  274. struct tracer *next;
  275. int print_max;
  276. struct tracer_flags *flags;
  277. };
  278. #define TRACE_PIPE_ALL_CPU -1
  279. int tracer_init(struct tracer *t, struct trace_array *tr);
  280. int tracing_is_enabled(void);
  281. void trace_wake_up(void);
  282. void tracing_reset(struct trace_array *tr, int cpu);
  283. void tracing_reset_online_cpus(struct trace_array *tr);
  284. void tracing_reset_current(int cpu);
  285. void tracing_reset_current_online_cpus(void);
  286. int tracing_open_generic(struct inode *inode, struct file *filp);
  287. struct dentry *trace_create_file(const char *name,
  288. mode_t mode,
  289. struct dentry *parent,
  290. void *data,
  291. const struct file_operations *fops);
  292. struct dentry *tracing_init_dentry(void);
  293. void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
  294. struct ring_buffer_event;
  295. struct ring_buffer_event *
  296. trace_buffer_lock_reserve(struct ring_buffer *buffer,
  297. int type,
  298. unsigned long len,
  299. unsigned long flags,
  300. int pc);
  301. void trace_buffer_unlock_commit(struct ring_buffer *buffer,
  302. struct ring_buffer_event *event,
  303. unsigned long flags, int pc);
  304. struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
  305. struct trace_array_cpu *data);
  306. struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  307. int *ent_cpu, u64 *ent_ts);
  308. void default_wait_pipe(struct trace_iterator *iter);
  309. void poll_wait_pipe(struct trace_iterator *iter);
  310. void ftrace(struct trace_array *tr,
  311. struct trace_array_cpu *data,
  312. unsigned long ip,
  313. unsigned long parent_ip,
  314. unsigned long flags, int pc);
  315. void tracing_sched_switch_trace(struct trace_array *tr,
  316. struct task_struct *prev,
  317. struct task_struct *next,
  318. unsigned long flags, int pc);
  319. void tracing_sched_wakeup_trace(struct trace_array *tr,
  320. struct task_struct *wakee,
  321. struct task_struct *cur,
  322. unsigned long flags, int pc);
  323. void trace_special(struct trace_array *tr,
  324. struct trace_array_cpu *data,
  325. unsigned long arg1,
  326. unsigned long arg2,
  327. unsigned long arg3, int pc);
  328. void trace_function(struct trace_array *tr,
  329. unsigned long ip,
  330. unsigned long parent_ip,
  331. unsigned long flags, int pc);
  332. void trace_graph_return(struct ftrace_graph_ret *trace);
  333. int trace_graph_entry(struct ftrace_graph_ent *trace);
  334. void set_graph_array(struct trace_array *tr);
  335. void tracing_start_cmdline_record(void);
  336. void tracing_stop_cmdline_record(void);
  337. void tracing_sched_switch_assign_trace(struct trace_array *tr);
  338. void tracing_stop_sched_switch_record(void);
  339. void tracing_start_sched_switch_record(void);
  340. int register_tracer(struct tracer *type);
  341. void unregister_tracer(struct tracer *type);
  342. int is_tracing_stopped(void);
  343. extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
  344. extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  345. extern unsigned long tracing_thresh;
  346. #ifdef CONFIG_TRACER_MAX_TRACE
  347. extern unsigned long tracing_max_latency;
  348. void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
  349. void update_max_tr_single(struct trace_array *tr,
  350. struct task_struct *tsk, int cpu);
  351. #endif /* CONFIG_TRACER_MAX_TRACE */
  352. #ifdef CONFIG_STACKTRACE
  353. void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
  354. int skip, int pc);
  355. void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
  356. int pc);
  357. void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
  358. int pc);
  359. #else
  360. static inline void ftrace_trace_stack(struct trace_array *tr,
  361. unsigned long flags, int skip, int pc)
  362. {
  363. }
  364. static inline void ftrace_trace_userstack(struct trace_array *tr,
  365. unsigned long flags, int pc)
  366. {
  367. }
  368. static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
  369. int skip, int pc)
  370. {
  371. }
  372. #endif /* CONFIG_STACKTRACE */
  373. extern cycle_t ftrace_now(int cpu);
  374. extern void trace_find_cmdline(int pid, char comm[]);
  375. #ifdef CONFIG_DYNAMIC_FTRACE
  376. extern unsigned long ftrace_update_tot_cnt;
  377. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  378. extern int DYN_FTRACE_TEST_NAME(void);
  379. #endif
  380. extern int ring_buffer_expanded;
  381. extern bool tracing_selftest_disabled;
  382. DECLARE_PER_CPU(int, ftrace_cpu_disabled);
  383. #ifdef CONFIG_FTRACE_STARTUP_TEST
  384. extern int trace_selftest_startup_function(struct tracer *trace,
  385. struct trace_array *tr);
  386. extern int trace_selftest_startup_function_graph(struct tracer *trace,
  387. struct trace_array *tr);
  388. extern int trace_selftest_startup_irqsoff(struct tracer *trace,
  389. struct trace_array *tr);
  390. extern int trace_selftest_startup_preemptoff(struct tracer *trace,
  391. struct trace_array *tr);
  392. extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
  393. struct trace_array *tr);
  394. extern int trace_selftest_startup_wakeup(struct tracer *trace,
  395. struct trace_array *tr);
  396. extern int trace_selftest_startup_nop(struct tracer *trace,
  397. struct trace_array *tr);
  398. extern int trace_selftest_startup_sched_switch(struct tracer *trace,
  399. struct trace_array *tr);
  400. extern int trace_selftest_startup_sysprof(struct tracer *trace,
  401. struct trace_array *tr);
  402. extern int trace_selftest_startup_branch(struct tracer *trace,
  403. struct trace_array *tr);
  404. extern int trace_selftest_startup_hw_branches(struct tracer *trace,
  405. struct trace_array *tr);
  406. extern int trace_selftest_startup_ksym(struct tracer *trace,
  407. struct trace_array *tr);
  408. #endif /* CONFIG_FTRACE_STARTUP_TEST */
  409. extern void *head_page(struct trace_array_cpu *data);
  410. extern unsigned long long ns2usecs(cycle_t nsec);
  411. extern int
  412. trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
  413. extern int
  414. trace_vprintk(unsigned long ip, const char *fmt, va_list args);
  415. extern int
  416. trace_array_vprintk(struct trace_array *tr,
  417. unsigned long ip, const char *fmt, va_list args);
  418. int trace_array_printk(struct trace_array *tr,
  419. unsigned long ip, const char *fmt, ...);
  420. extern unsigned long trace_flags;
  421. extern int trace_clock_id;
  422. /* Standard output formatting function used for function return traces */
  423. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  424. extern enum print_line_t print_graph_function(struct trace_iterator *iter);
  425. extern enum print_line_t
  426. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
  427. #ifdef CONFIG_DYNAMIC_FTRACE
  428. /* TODO: make this variable */
  429. #define FTRACE_GRAPH_MAX_FUNCS 32
  430. extern int ftrace_graph_filter_enabled;
  431. extern int ftrace_graph_count;
  432. extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
  433. static inline int ftrace_graph_addr(unsigned long addr)
  434. {
  435. int i;
  436. if (!ftrace_graph_filter_enabled)
  437. return 1;
  438. for (i = 0; i < ftrace_graph_count; i++) {
  439. if (addr == ftrace_graph_funcs[i])
  440. return 1;
  441. }
  442. return 0;
  443. }
  444. #else
  445. static inline int ftrace_graph_addr(unsigned long addr)
  446. {
  447. return 1;
  448. }
  449. #endif /* CONFIG_DYNAMIC_FTRACE */
  450. #else /* CONFIG_FUNCTION_GRAPH_TRACER */
  451. static inline enum print_line_t
  452. print_graph_function(struct trace_iterator *iter)
  453. {
  454. return TRACE_TYPE_UNHANDLED;
  455. }
  456. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  457. extern struct list_head ftrace_pids;
  458. #ifdef CONFIG_FUNCTION_TRACER
  459. static inline int ftrace_trace_task(struct task_struct *task)
  460. {
  461. if (list_empty(&ftrace_pids))
  462. return 1;
  463. return test_tsk_trace_trace(task);
  464. }
  465. #else
  466. static inline int ftrace_trace_task(struct task_struct *task)
  467. {
  468. return 1;
  469. }
  470. #endif
  471. /*
  472. * struct trace_parser - servers for reading the user input separated by spaces
  473. * @cont: set if the input is not complete - no final space char was found
  474. * @buffer: holds the parsed user input
  475. * @idx: user input length
  476. * @size: buffer size
  477. */
  478. struct trace_parser {
  479. bool cont;
  480. char *buffer;
  481. unsigned idx;
  482. unsigned size;
  483. };
  484. static inline bool trace_parser_loaded(struct trace_parser *parser)
  485. {
  486. return (parser->idx != 0);
  487. }
  488. static inline bool trace_parser_cont(struct trace_parser *parser)
  489. {
  490. return parser->cont;
  491. }
  492. static inline void trace_parser_clear(struct trace_parser *parser)
  493. {
  494. parser->cont = false;
  495. parser->idx = 0;
  496. }
  497. extern int trace_parser_get_init(struct trace_parser *parser, int size);
  498. extern void trace_parser_put(struct trace_parser *parser);
  499. extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
  500. size_t cnt, loff_t *ppos);
  501. /*
  502. * trace_iterator_flags is an enumeration that defines bit
  503. * positions into trace_flags that controls the output.
  504. *
  505. * NOTE: These bits must match the trace_options array in
  506. * trace.c.
  507. */
  508. enum trace_iterator_flags {
  509. TRACE_ITER_PRINT_PARENT = 0x01,
  510. TRACE_ITER_SYM_OFFSET = 0x02,
  511. TRACE_ITER_SYM_ADDR = 0x04,
  512. TRACE_ITER_VERBOSE = 0x08,
  513. TRACE_ITER_RAW = 0x10,
  514. TRACE_ITER_HEX = 0x20,
  515. TRACE_ITER_BIN = 0x40,
  516. TRACE_ITER_BLOCK = 0x80,
  517. TRACE_ITER_STACKTRACE = 0x100,
  518. TRACE_ITER_PRINTK = 0x200,
  519. TRACE_ITER_PREEMPTONLY = 0x400,
  520. TRACE_ITER_BRANCH = 0x800,
  521. TRACE_ITER_ANNOTATE = 0x1000,
  522. TRACE_ITER_USERSTACKTRACE = 0x2000,
  523. TRACE_ITER_SYM_USEROBJ = 0x4000,
  524. TRACE_ITER_PRINTK_MSGONLY = 0x8000,
  525. TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
  526. TRACE_ITER_LATENCY_FMT = 0x20000,
  527. TRACE_ITER_SLEEP_TIME = 0x40000,
  528. TRACE_ITER_GRAPH_TIME = 0x80000,
  529. };
  530. /*
  531. * TRACE_ITER_SYM_MASK masks the options in trace_flags that
  532. * control the output of kernel symbols.
  533. */
  534. #define TRACE_ITER_SYM_MASK \
  535. (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
  536. extern struct tracer nop_trace;
  537. /**
  538. * ftrace_preempt_disable - disable preemption scheduler safe
  539. *
  540. * When tracing can happen inside the scheduler, there exists
  541. * cases that the tracing might happen before the need_resched
  542. * flag is checked. If this happens and the tracer calls
  543. * preempt_enable (after a disable), a schedule might take place
  544. * causing an infinite recursion.
  545. *
  546. * To prevent this, we read the need_resched flag before
  547. * disabling preemption. When we want to enable preemption we
  548. * check the flag, if it is set, then we call preempt_enable_no_resched.
  549. * Otherwise, we call preempt_enable.
  550. *
  551. * The rational for doing the above is that if need_resched is set
  552. * and we have yet to reschedule, we are either in an atomic location
  553. * (where we do not need to check for scheduling) or we are inside
  554. * the scheduler and do not want to resched.
  555. */
  556. static inline int ftrace_preempt_disable(void)
  557. {
  558. int resched;
  559. resched = need_resched();
  560. preempt_disable_notrace();
  561. return resched;
  562. }
  563. /**
  564. * ftrace_preempt_enable - enable preemption scheduler safe
  565. * @resched: the return value from ftrace_preempt_disable
  566. *
  567. * This is a scheduler safe way to enable preemption and not miss
  568. * any preemption checks. The disabled saved the state of preemption.
  569. * If resched is set, then we are either inside an atomic or
  570. * are inside the scheduler (we would have already scheduled
  571. * otherwise). In this case, we do not want to call normal
  572. * preempt_enable, but preempt_enable_no_resched instead.
  573. */
  574. static inline void ftrace_preempt_enable(int resched)
  575. {
  576. if (resched)
  577. preempt_enable_no_resched_notrace();
  578. else
  579. preempt_enable_notrace();
  580. }
  581. #ifdef CONFIG_BRANCH_TRACER
  582. extern int enable_branch_tracing(struct trace_array *tr);
  583. extern void disable_branch_tracing(void);
  584. static inline int trace_branch_enable(struct trace_array *tr)
  585. {
  586. if (trace_flags & TRACE_ITER_BRANCH)
  587. return enable_branch_tracing(tr);
  588. return 0;
  589. }
  590. static inline void trace_branch_disable(void)
  591. {
  592. /* due to races, always disable */
  593. disable_branch_tracing();
  594. }
  595. #else
  596. static inline int trace_branch_enable(struct trace_array *tr)
  597. {
  598. return 0;
  599. }
  600. static inline void trace_branch_disable(void)
  601. {
  602. }
  603. #endif /* CONFIG_BRANCH_TRACER */
  604. /* set ring buffers to default size if not already done so */
  605. int tracing_update_buffers(void);
  606. /* trace event type bit fields, not numeric */
  607. enum {
  608. TRACE_EVENT_TYPE_PRINTF = 1,
  609. TRACE_EVENT_TYPE_RAW = 2,
  610. };
  611. struct ftrace_event_field {
  612. struct list_head link;
  613. char *name;
  614. char *type;
  615. int filter_type;
  616. int offset;
  617. int size;
  618. int is_signed;
  619. };
  620. struct event_filter {
  621. int n_preds;
  622. struct filter_pred **preds;
  623. char *filter_string;
  624. };
  625. struct event_subsystem {
  626. struct list_head list;
  627. const char *name;
  628. struct dentry *entry;
  629. struct event_filter *filter;
  630. int nr_events;
  631. };
  632. struct filter_pred;
  633. struct regex;
  634. typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
  635. int val1, int val2);
  636. typedef int (*regex_match_func)(char *str, struct regex *r, int len);
  637. enum regex_type {
  638. MATCH_FULL = 0,
  639. MATCH_FRONT_ONLY,
  640. MATCH_MIDDLE_ONLY,
  641. MATCH_END_ONLY,
  642. };
  643. struct regex {
  644. char pattern[MAX_FILTER_STR_VAL];
  645. int len;
  646. int field_len;
  647. regex_match_func match;
  648. };
  649. struct filter_pred {
  650. filter_pred_fn_t fn;
  651. u64 val;
  652. struct regex regex;
  653. char *field_name;
  654. int offset;
  655. int not;
  656. int op;
  657. int pop_n;
  658. };
  659. extern enum regex_type
  660. filter_parse_regex(char *buff, int len, char **search, int *not);
  661. extern void print_event_filter(struct ftrace_event_call *call,
  662. struct trace_seq *s);
  663. extern int apply_event_filter(struct ftrace_event_call *call,
  664. char *filter_string);
  665. extern int apply_subsystem_event_filter(struct event_subsystem *system,
  666. char *filter_string);
  667. extern void print_subsystem_event_filter(struct event_subsystem *system,
  668. struct trace_seq *s);
  669. extern int filter_assign_type(const char *type);
  670. static inline int
  671. filter_check_discard(struct ftrace_event_call *call, void *rec,
  672. struct ring_buffer *buffer,
  673. struct ring_buffer_event *event)
  674. {
  675. if (unlikely(call->filter_active) &&
  676. !filter_match_preds(call->filter, rec)) {
  677. ring_buffer_discard_commit(buffer, event);
  678. return 1;
  679. }
  680. return 0;
  681. }
  682. extern struct mutex event_mutex;
  683. extern struct list_head ftrace_events;
  684. extern const char *__start___trace_bprintk_fmt[];
  685. extern const char *__stop___trace_bprintk_fmt[];
  686. #undef FTRACE_ENTRY
  687. #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
  688. extern struct ftrace_event_call \
  689. __attribute__((__aligned__(4))) event_##call;
  690. #undef FTRACE_ENTRY_DUP
  691. #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
  692. FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
  693. #include "trace_entries.h"
  694. #endif /* _LINUX_KERNEL_TRACE_H */