trace.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. #ifndef _LINUX_KERNEL_TRACE_H
  2. #define _LINUX_KERNEL_TRACE_H
  3. #include <linux/fs.h>
  4. #include <asm/atomic.h>
  5. #include <linux/sched.h>
  6. #include <linux/clocksource.h>
  7. #include <linux/ring_buffer.h>
  8. #include <linux/mmiotrace.h>
  9. #include <linux/ftrace.h>
  10. #include <trace/boot.h>
  11. #include <trace/kmemtrace.h>
  12. #include <trace/power.h>
  13. enum trace_type {
  14. __TRACE_FIRST_TYPE = 0,
  15. TRACE_FN,
  16. TRACE_CTX,
  17. TRACE_WAKE,
  18. TRACE_STACK,
  19. TRACE_PRINT,
  20. TRACE_BPRINT,
  21. TRACE_SPECIAL,
  22. TRACE_MMIO_RW,
  23. TRACE_MMIO_MAP,
  24. TRACE_BRANCH,
  25. TRACE_BOOT_CALL,
  26. TRACE_BOOT_RET,
  27. TRACE_GRAPH_RET,
  28. TRACE_GRAPH_ENT,
  29. TRACE_USER_STACK,
  30. TRACE_HW_BRANCHES,
  31. TRACE_SYSCALL_ENTER,
  32. TRACE_SYSCALL_EXIT,
  33. TRACE_KMEM_ALLOC,
  34. TRACE_KMEM_FREE,
  35. TRACE_POWER,
  36. TRACE_BLK,
  37. __TRACE_LAST_TYPE,
  38. };
  39. /*
  40. * The trace entry - the most basic unit of tracing. This is what
  41. * is printed in the end as a single line in the trace output, such as:
  42. *
  43. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  44. */
  45. struct trace_entry {
  46. unsigned char type;
  47. unsigned char flags;
  48. unsigned char preempt_count;
  49. int pid;
  50. int tgid;
  51. };
  52. /*
  53. * Function trace entry - function address and parent function addres:
  54. */
  55. struct ftrace_entry {
  56. struct trace_entry ent;
  57. unsigned long ip;
  58. unsigned long parent_ip;
  59. };
  60. /* Function call entry */
  61. struct ftrace_graph_ent_entry {
  62. struct trace_entry ent;
  63. struct ftrace_graph_ent graph_ent;
  64. };
  65. /* Function return entry */
  66. struct ftrace_graph_ret_entry {
  67. struct trace_entry ent;
  68. struct ftrace_graph_ret ret;
  69. };
  70. extern struct tracer boot_tracer;
  71. /*
  72. * Context switch trace entry - which task (and prio) we switched from/to:
  73. */
  74. struct ctx_switch_entry {
  75. struct trace_entry ent;
  76. unsigned int prev_pid;
  77. unsigned char prev_prio;
  78. unsigned char prev_state;
  79. unsigned int next_pid;
  80. unsigned char next_prio;
  81. unsigned char next_state;
  82. unsigned int next_cpu;
  83. };
  84. /*
  85. * Special (free-form) trace entry:
  86. */
  87. struct special_entry {
  88. struct trace_entry ent;
  89. unsigned long arg1;
  90. unsigned long arg2;
  91. unsigned long arg3;
  92. };
  93. /*
  94. * Stack-trace entry:
  95. */
  96. #define FTRACE_STACK_ENTRIES 8
  97. struct stack_entry {
  98. struct trace_entry ent;
  99. unsigned long caller[FTRACE_STACK_ENTRIES];
  100. };
  101. struct userstack_entry {
  102. struct trace_entry ent;
  103. unsigned long caller[FTRACE_STACK_ENTRIES];
  104. };
  105. /*
  106. * trace_printk entry:
  107. */
  108. struct bprint_entry {
  109. struct trace_entry ent;
  110. unsigned long ip;
  111. const char *fmt;
  112. u32 buf[];
  113. };
  114. struct print_entry {
  115. struct trace_entry ent;
  116. unsigned long ip;
  117. char buf[];
  118. };
  119. #define TRACE_OLD_SIZE 88
  120. struct trace_field_cont {
  121. unsigned char type;
  122. /* Temporary till we get rid of this completely */
  123. char buf[TRACE_OLD_SIZE - 1];
  124. };
  125. struct trace_mmiotrace_rw {
  126. struct trace_entry ent;
  127. struct mmiotrace_rw rw;
  128. };
  129. struct trace_mmiotrace_map {
  130. struct trace_entry ent;
  131. struct mmiotrace_map map;
  132. };
  133. struct trace_boot_call {
  134. struct trace_entry ent;
  135. struct boot_trace_call boot_call;
  136. };
  137. struct trace_boot_ret {
  138. struct trace_entry ent;
  139. struct boot_trace_ret boot_ret;
  140. };
  141. #define TRACE_FUNC_SIZE 30
  142. #define TRACE_FILE_SIZE 20
  143. struct trace_branch {
  144. struct trace_entry ent;
  145. unsigned line;
  146. char func[TRACE_FUNC_SIZE+1];
  147. char file[TRACE_FILE_SIZE+1];
  148. char correct;
  149. };
  150. struct hw_branch_entry {
  151. struct trace_entry ent;
  152. u64 from;
  153. u64 to;
  154. };
  155. struct trace_power {
  156. struct trace_entry ent;
  157. struct power_trace state_data;
  158. };
  159. enum kmemtrace_type_id {
  160. KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
  161. KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
  162. KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
  163. };
  164. struct kmemtrace_alloc_entry {
  165. struct trace_entry ent;
  166. enum kmemtrace_type_id type_id;
  167. unsigned long call_site;
  168. const void *ptr;
  169. size_t bytes_req;
  170. size_t bytes_alloc;
  171. gfp_t gfp_flags;
  172. int node;
  173. };
  174. struct kmemtrace_free_entry {
  175. struct trace_entry ent;
  176. enum kmemtrace_type_id type_id;
  177. unsigned long call_site;
  178. const void *ptr;
  179. };
  180. struct syscall_trace_enter {
  181. struct trace_entry ent;
  182. int nr;
  183. unsigned long args[];
  184. };
  185. struct syscall_trace_exit {
  186. struct trace_entry ent;
  187. int nr;
  188. unsigned long ret;
  189. };
  190. /*
  191. * trace_flag_type is an enumeration that holds different
  192. * states when a trace occurs. These are:
  193. * IRQS_OFF - interrupts were disabled
  194. * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
  195. * NEED_RESCED - reschedule is requested
  196. * HARDIRQ - inside an interrupt handler
  197. * SOFTIRQ - inside a softirq handler
  198. */
  199. enum trace_flag_type {
  200. TRACE_FLAG_IRQS_OFF = 0x01,
  201. TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
  202. TRACE_FLAG_NEED_RESCHED = 0x04,
  203. TRACE_FLAG_HARDIRQ = 0x08,
  204. TRACE_FLAG_SOFTIRQ = 0x10,
  205. };
  206. #define TRACE_BUF_SIZE 1024
  207. /*
  208. * The CPU trace array - it consists of thousands of trace entries
  209. * plus some other descriptor data: (for example which task started
  210. * the trace, etc.)
  211. */
  212. struct trace_array_cpu {
  213. atomic_t disabled;
  214. void *buffer_page; /* ring buffer spare */
  215. /* these fields get copied into max-trace: */
  216. unsigned long trace_idx;
  217. unsigned long overrun;
  218. unsigned long saved_latency;
  219. unsigned long critical_start;
  220. unsigned long critical_end;
  221. unsigned long critical_sequence;
  222. unsigned long nice;
  223. unsigned long policy;
  224. unsigned long rt_priority;
  225. cycle_t preempt_timestamp;
  226. pid_t pid;
  227. uid_t uid;
  228. char comm[TASK_COMM_LEN];
  229. };
  230. struct trace_iterator;
  231. /*
  232. * The trace array - an array of per-CPU trace arrays. This is the
  233. * highest level data structure that individual tracers deal with.
  234. * They have on/off state as well:
  235. */
  236. struct trace_array {
  237. struct ring_buffer *buffer;
  238. unsigned long entries;
  239. int cpu;
  240. cycle_t time_start;
  241. struct task_struct *waiter;
  242. struct trace_array_cpu *data[NR_CPUS];
  243. };
  244. #define FTRACE_CMP_TYPE(var, type) \
  245. __builtin_types_compatible_p(typeof(var), type *)
  246. #undef IF_ASSIGN
  247. #define IF_ASSIGN(var, entry, etype, id) \
  248. if (FTRACE_CMP_TYPE(var, etype)) { \
  249. var = (typeof(var))(entry); \
  250. WARN_ON(id && (entry)->type != id); \
  251. break; \
  252. }
  253. /* Will cause compile errors if type is not found. */
  254. extern void __ftrace_bad_type(void);
  255. /*
  256. * The trace_assign_type is a verifier that the entry type is
  257. * the same as the type being assigned. To add new types simply
  258. * add a line with the following format:
  259. *
  260. * IF_ASSIGN(var, ent, type, id);
  261. *
  262. * Where "type" is the trace type that includes the trace_entry
  263. * as the "ent" item. And "id" is the trace identifier that is
  264. * used in the trace_type enum.
  265. *
  266. * If the type can have more than one id, then use zero.
  267. */
  268. #define trace_assign_type(var, ent) \
  269. do { \
  270. IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
  271. IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
  272. IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
  273. IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
  274. IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
  275. IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
  276. IF_ASSIGN(var, ent, struct special_entry, 0); \
  277. IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
  278. TRACE_MMIO_RW); \
  279. IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
  280. TRACE_MMIO_MAP); \
  281. IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
  282. IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
  283. IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
  284. IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
  285. TRACE_GRAPH_ENT); \
  286. IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
  287. TRACE_GRAPH_RET); \
  288. IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
  289. IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
  290. IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
  291. TRACE_KMEM_ALLOC); \
  292. IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
  293. TRACE_KMEM_FREE); \
  294. IF_ASSIGN(var, ent, struct syscall_trace_enter, \
  295. TRACE_SYSCALL_ENTER); \
  296. IF_ASSIGN(var, ent, struct syscall_trace_exit, \
  297. TRACE_SYSCALL_EXIT); \
  298. __ftrace_bad_type(); \
  299. } while (0)
  300. /* Return values for print_line callback */
  301. enum print_line_t {
  302. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  303. TRACE_TYPE_HANDLED = 1,
  304. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  305. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  306. };
  307. /*
  308. * An option specific to a tracer. This is a boolean value.
  309. * The bit is the bit index that sets its value on the
  310. * flags value in struct tracer_flags.
  311. */
  312. struct tracer_opt {
  313. const char *name; /* Will appear on the trace_options file */
  314. u32 bit; /* Mask assigned in val field in tracer_flags */
  315. };
  316. /*
  317. * The set of specific options for a tracer. Your tracer
  318. * have to set the initial value of the flags val.
  319. */
  320. struct tracer_flags {
  321. u32 val;
  322. struct tracer_opt *opts;
  323. };
  324. /* Makes more easy to define a tracer opt */
  325. #define TRACER_OPT(s, b) .name = #s, .bit = b
  326. /**
  327. * struct tracer - a specific tracer and its callbacks to interact with debugfs
  328. * @name: the name chosen to select it on the available_tracers file
  329. * @init: called when one switches to this tracer (echo name > current_tracer)
  330. * @reset: called when one switches to another tracer
  331. * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
  332. * @stop: called when tracing is paused (echo 0 > tracing_enabled)
  333. * @open: called when the trace file is opened
  334. * @pipe_open: called when the trace_pipe file is opened
  335. * @wait_pipe: override how the user waits for traces on trace_pipe
  336. * @close: called when the trace file is released
  337. * @read: override the default read callback on trace_pipe
  338. * @splice_read: override the default splice_read callback on trace_pipe
  339. * @selftest: selftest to run on boot (see trace_selftest.c)
  340. * @print_headers: override the first lines that describe your columns
  341. * @print_line: callback that prints a trace
  342. * @set_flag: signals one of your private flags changed (trace_options file)
  343. * @flags: your private flags
  344. */
  345. struct tracer {
  346. const char *name;
  347. int (*init)(struct trace_array *tr);
  348. void (*reset)(struct trace_array *tr);
  349. void (*start)(struct trace_array *tr);
  350. void (*stop)(struct trace_array *tr);
  351. void (*open)(struct trace_iterator *iter);
  352. void (*pipe_open)(struct trace_iterator *iter);
  353. void (*wait_pipe)(struct trace_iterator *iter);
  354. void (*close)(struct trace_iterator *iter);
  355. ssize_t (*read)(struct trace_iterator *iter,
  356. struct file *filp, char __user *ubuf,
  357. size_t cnt, loff_t *ppos);
  358. ssize_t (*splice_read)(struct trace_iterator *iter,
  359. struct file *filp,
  360. loff_t *ppos,
  361. struct pipe_inode_info *pipe,
  362. size_t len,
  363. unsigned int flags);
  364. #ifdef CONFIG_FTRACE_STARTUP_TEST
  365. int (*selftest)(struct tracer *trace,
  366. struct trace_array *tr);
  367. #endif
  368. void (*print_header)(struct seq_file *m);
  369. enum print_line_t (*print_line)(struct trace_iterator *iter);
  370. /* If you handled the flag setting, return 0 */
  371. int (*set_flag)(u32 old_flags, u32 bit, int set);
  372. struct tracer *next;
  373. int print_max;
  374. struct tracer_flags *flags;
  375. struct tracer_stat *stats;
  376. };
  377. struct trace_seq {
  378. unsigned char buffer[PAGE_SIZE];
  379. unsigned int len;
  380. unsigned int readpos;
  381. };
  382. static inline void
  383. trace_seq_init(struct trace_seq *s)
  384. {
  385. s->len = 0;
  386. s->readpos = 0;
  387. }
  388. #define TRACE_PIPE_ALL_CPU -1
  389. /*
  390. * Trace iterator - used by printout routines who present trace
  391. * results to users and which routines might sleep, etc:
  392. */
  393. struct trace_iterator {
  394. struct trace_array *tr;
  395. struct tracer *trace;
  396. void *private;
  397. int cpu_file;
  398. struct mutex mutex;
  399. struct ring_buffer_iter *buffer_iter[NR_CPUS];
  400. /* The below is zeroed out in pipe_read */
  401. struct trace_seq seq;
  402. struct trace_entry *ent;
  403. int cpu;
  404. u64 ts;
  405. unsigned long iter_flags;
  406. loff_t pos;
  407. long idx;
  408. cpumask_var_t started;
  409. };
  410. int tracer_init(struct tracer *t, struct trace_array *tr);
  411. int tracing_is_enabled(void);
  412. void trace_wake_up(void);
  413. void tracing_reset(struct trace_array *tr, int cpu);
  414. void tracing_reset_online_cpus(struct trace_array *tr);
  415. int tracing_open_generic(struct inode *inode, struct file *filp);
  416. struct dentry *tracing_init_dentry(void);
  417. void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
  418. struct ring_buffer_event;
  419. struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
  420. unsigned char type,
  421. unsigned long len,
  422. unsigned long flags,
  423. int pc);
  424. void trace_buffer_unlock_commit(struct trace_array *tr,
  425. struct ring_buffer_event *event,
  426. unsigned long flags, int pc);
  427. struct ring_buffer_event *
  428. trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
  429. unsigned long flags, int pc);
  430. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  431. unsigned long flags, int pc);
  432. void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
  433. unsigned long flags, int pc);
  434. struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
  435. struct trace_array_cpu *data);
  436. struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  437. int *ent_cpu, u64 *ent_ts);
  438. void tracing_generic_entry_update(struct trace_entry *entry,
  439. unsigned long flags,
  440. int pc);
  441. void default_wait_pipe(struct trace_iterator *iter);
  442. void poll_wait_pipe(struct trace_iterator *iter);
  443. void ftrace(struct trace_array *tr,
  444. struct trace_array_cpu *data,
  445. unsigned long ip,
  446. unsigned long parent_ip,
  447. unsigned long flags, int pc);
  448. void tracing_sched_switch_trace(struct trace_array *tr,
  449. struct task_struct *prev,
  450. struct task_struct *next,
  451. unsigned long flags, int pc);
  452. void tracing_record_cmdline(struct task_struct *tsk);
  453. void tracing_sched_wakeup_trace(struct trace_array *tr,
  454. struct task_struct *wakee,
  455. struct task_struct *cur,
  456. unsigned long flags, int pc);
  457. void trace_special(struct trace_array *tr,
  458. struct trace_array_cpu *data,
  459. unsigned long arg1,
  460. unsigned long arg2,
  461. unsigned long arg3, int pc);
  462. void trace_function(struct trace_array *tr,
  463. unsigned long ip,
  464. unsigned long parent_ip,
  465. unsigned long flags, int pc);
  466. void trace_graph_return(struct ftrace_graph_ret *trace);
  467. int trace_graph_entry(struct ftrace_graph_ent *trace);
  468. void tracing_start_cmdline_record(void);
  469. void tracing_stop_cmdline_record(void);
  470. void tracing_sched_switch_assign_trace(struct trace_array *tr);
  471. void tracing_stop_sched_switch_record(void);
  472. void tracing_start_sched_switch_record(void);
  473. int register_tracer(struct tracer *type);
  474. void unregister_tracer(struct tracer *type);
  475. extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  476. extern unsigned long tracing_max_latency;
  477. extern unsigned long tracing_thresh;
  478. void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
  479. void update_max_tr_single(struct trace_array *tr,
  480. struct task_struct *tsk, int cpu);
  481. void __trace_stack(struct trace_array *tr,
  482. unsigned long flags,
  483. int skip, int pc);
  484. extern cycle_t ftrace_now(int cpu);
  485. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  486. typedef void
  487. (*tracer_switch_func_t)(void *private,
  488. void *__rq,
  489. struct task_struct *prev,
  490. struct task_struct *next);
  491. struct tracer_switch_ops {
  492. tracer_switch_func_t func;
  493. void *private;
  494. struct tracer_switch_ops *next;
  495. };
  496. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  497. extern void trace_find_cmdline(int pid, char comm[]);
  498. #ifdef CONFIG_DYNAMIC_FTRACE
  499. extern unsigned long ftrace_update_tot_cnt;
  500. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  501. extern int DYN_FTRACE_TEST_NAME(void);
  502. #endif
  503. #ifdef CONFIG_FTRACE_STARTUP_TEST
  504. extern int trace_selftest_startup_function(struct tracer *trace,
  505. struct trace_array *tr);
  506. extern int trace_selftest_startup_function_graph(struct tracer *trace,
  507. struct trace_array *tr);
  508. extern int trace_selftest_startup_irqsoff(struct tracer *trace,
  509. struct trace_array *tr);
  510. extern int trace_selftest_startup_preemptoff(struct tracer *trace,
  511. struct trace_array *tr);
  512. extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
  513. struct trace_array *tr);
  514. extern int trace_selftest_startup_wakeup(struct tracer *trace,
  515. struct trace_array *tr);
  516. extern int trace_selftest_startup_nop(struct tracer *trace,
  517. struct trace_array *tr);
  518. extern int trace_selftest_startup_sched_switch(struct tracer *trace,
  519. struct trace_array *tr);
  520. extern int trace_selftest_startup_sysprof(struct tracer *trace,
  521. struct trace_array *tr);
  522. extern int trace_selftest_startup_branch(struct tracer *trace,
  523. struct trace_array *tr);
  524. #endif /* CONFIG_FTRACE_STARTUP_TEST */
  525. extern void *head_page(struct trace_array_cpu *data);
  526. extern unsigned long long ns2usecs(cycle_t nsec);
  527. extern int
  528. trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
  529. extern int
  530. trace_vprintk(unsigned long ip, const char *fmt, va_list args);
  531. extern unsigned long trace_flags;
  532. /* Standard output formatting function used for function return traces */
  533. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  534. extern enum print_line_t print_graph_function(struct trace_iterator *iter);
  535. #ifdef CONFIG_DYNAMIC_FTRACE
  536. /* TODO: make this variable */
  537. #define FTRACE_GRAPH_MAX_FUNCS 32
  538. extern int ftrace_graph_count;
  539. extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
  540. static inline int ftrace_graph_addr(unsigned long addr)
  541. {
  542. int i;
  543. if (!ftrace_graph_count || test_tsk_trace_graph(current))
  544. return 1;
  545. for (i = 0; i < ftrace_graph_count; i++) {
  546. if (addr == ftrace_graph_funcs[i])
  547. return 1;
  548. }
  549. return 0;
  550. }
  551. #else
  552. static inline int ftrace_trace_addr(unsigned long addr)
  553. {
  554. return 1;
  555. }
  556. static inline int ftrace_graph_addr(unsigned long addr)
  557. {
  558. return 1;
  559. }
  560. #endif /* CONFIG_DYNAMIC_FTRACE */
  561. #else /* CONFIG_FUNCTION_GRAPH_TRACER */
  562. static inline enum print_line_t
  563. print_graph_function(struct trace_iterator *iter)
  564. {
  565. return TRACE_TYPE_UNHANDLED;
  566. }
  567. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  568. extern struct pid *ftrace_pid_trace;
  569. static inline int ftrace_trace_task(struct task_struct *task)
  570. {
  571. if (!ftrace_pid_trace)
  572. return 1;
  573. return test_tsk_trace_trace(task);
  574. }
  575. /*
  576. * trace_iterator_flags is an enumeration that defines bit
  577. * positions into trace_flags that controls the output.
  578. *
  579. * NOTE: These bits must match the trace_options array in
  580. * trace.c.
  581. */
  582. enum trace_iterator_flags {
  583. TRACE_ITER_PRINT_PARENT = 0x01,
  584. TRACE_ITER_SYM_OFFSET = 0x02,
  585. TRACE_ITER_SYM_ADDR = 0x04,
  586. TRACE_ITER_VERBOSE = 0x08,
  587. TRACE_ITER_RAW = 0x10,
  588. TRACE_ITER_HEX = 0x20,
  589. TRACE_ITER_BIN = 0x40,
  590. TRACE_ITER_BLOCK = 0x80,
  591. TRACE_ITER_STACKTRACE = 0x100,
  592. TRACE_ITER_SCHED_TREE = 0x200,
  593. TRACE_ITER_PRINTK = 0x400,
  594. TRACE_ITER_PREEMPTONLY = 0x800,
  595. TRACE_ITER_BRANCH = 0x1000,
  596. TRACE_ITER_ANNOTATE = 0x2000,
  597. TRACE_ITER_USERSTACKTRACE = 0x4000,
  598. TRACE_ITER_SYM_USEROBJ = 0x8000,
  599. TRACE_ITER_PRINTK_MSGONLY = 0x10000,
  600. TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
  601. TRACE_ITER_LATENCY_FMT = 0x40000,
  602. TRACE_ITER_GLOBAL_CLK = 0x80000,
  603. TRACE_ITER_SLEEP_TIME = 0x100000,
  604. };
  605. /*
  606. * TRACE_ITER_SYM_MASK masks the options in trace_flags that
  607. * control the output of kernel symbols.
  608. */
  609. #define TRACE_ITER_SYM_MASK \
  610. (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
  611. extern struct tracer nop_trace;
  612. /**
  613. * ftrace_preempt_disable - disable preemption scheduler safe
  614. *
  615. * When tracing can happen inside the scheduler, there exists
  616. * cases that the tracing might happen before the need_resched
  617. * flag is checked. If this happens and the tracer calls
  618. * preempt_enable (after a disable), a schedule might take place
  619. * causing an infinite recursion.
  620. *
  621. * To prevent this, we read the need_resched flag before
  622. * disabling preemption. When we want to enable preemption we
  623. * check the flag, if it is set, then we call preempt_enable_no_resched.
  624. * Otherwise, we call preempt_enable.
  625. *
  626. * The rational for doing the above is that if need_resched is set
  627. * and we have yet to reschedule, we are either in an atomic location
  628. * (where we do not need to check for scheduling) or we are inside
  629. * the scheduler and do not want to resched.
  630. */
  631. static inline int ftrace_preempt_disable(void)
  632. {
  633. int resched;
  634. resched = need_resched();
  635. preempt_disable_notrace();
  636. return resched;
  637. }
  638. /**
  639. * ftrace_preempt_enable - enable preemption scheduler safe
  640. * @resched: the return value from ftrace_preempt_disable
  641. *
  642. * This is a scheduler safe way to enable preemption and not miss
  643. * any preemption checks. The disabled saved the state of preemption.
  644. * If resched is set, then we are either inside an atomic or
  645. * are inside the scheduler (we would have already scheduled
  646. * otherwise). In this case, we do not want to call normal
  647. * preempt_enable, but preempt_enable_no_resched instead.
  648. */
  649. static inline void ftrace_preempt_enable(int resched)
  650. {
  651. if (resched)
  652. preempt_enable_no_resched_notrace();
  653. else
  654. preempt_enable_notrace();
  655. }
  656. #ifdef CONFIG_BRANCH_TRACER
  657. extern int enable_branch_tracing(struct trace_array *tr);
  658. extern void disable_branch_tracing(void);
  659. static inline int trace_branch_enable(struct trace_array *tr)
  660. {
  661. if (trace_flags & TRACE_ITER_BRANCH)
  662. return enable_branch_tracing(tr);
  663. return 0;
  664. }
  665. static inline void trace_branch_disable(void)
  666. {
  667. /* due to races, always disable */
  668. disable_branch_tracing();
  669. }
  670. #else
  671. static inline int trace_branch_enable(struct trace_array *tr)
  672. {
  673. return 0;
  674. }
  675. static inline void trace_branch_disable(void)
  676. {
  677. }
  678. #endif /* CONFIG_BRANCH_TRACER */
  679. /* set ring buffers to default size if not already done so */
  680. int tracing_update_buffers(void);
  681. /* trace event type bit fields, not numeric */
  682. enum {
  683. TRACE_EVENT_TYPE_PRINTF = 1,
  684. TRACE_EVENT_TYPE_RAW = 2,
  685. };
  686. struct ftrace_event_field {
  687. struct list_head link;
  688. char *name;
  689. char *type;
  690. int offset;
  691. int size;
  692. };
  693. struct ftrace_event_call {
  694. char *name;
  695. char *system;
  696. struct dentry *dir;
  697. int enabled;
  698. int (*regfunc)(void);
  699. void (*unregfunc)(void);
  700. int id;
  701. int (*raw_init)(void);
  702. int (*show_format)(struct trace_seq *s);
  703. int (*define_fields)(void);
  704. struct list_head fields;
  705. struct filter_pred **preds;
  706. #ifdef CONFIG_EVENT_PROFILE
  707. atomic_t profile_count;
  708. int (*profile_enable)(struct ftrace_event_call *);
  709. void (*profile_disable)(struct ftrace_event_call *);
  710. #endif
  711. };
  712. struct event_subsystem {
  713. struct list_head list;
  714. const char *name;
  715. struct dentry *entry;
  716. struct filter_pred **preds;
  717. };
  718. #define events_for_each(event) \
  719. for (event = __start_ftrace_events; \
  720. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  721. event++)
  722. #define MAX_FILTER_PRED 8
  723. struct filter_pred;
  724. typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
  725. struct filter_pred {
  726. filter_pred_fn_t fn;
  727. u64 val;
  728. char *str_val;
  729. int str_len;
  730. char *field_name;
  731. int offset;
  732. int not;
  733. int or;
  734. int compound;
  735. int clear;
  736. };
  737. int trace_define_field(struct ftrace_event_call *call, char *type,
  738. char *name, int offset, int size);
  739. extern void filter_free_pred(struct filter_pred *pred);
  740. extern void filter_print_preds(struct filter_pred **preds,
  741. struct trace_seq *s);
  742. extern int filter_parse(char **pbuf, struct filter_pred *pred);
  743. extern int filter_add_pred(struct ftrace_event_call *call,
  744. struct filter_pred *pred);
  745. extern void filter_free_preds(struct ftrace_event_call *call);
  746. extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
  747. extern void filter_free_subsystem_preds(struct event_subsystem *system);
  748. extern int filter_add_subsystem_pred(struct event_subsystem *system,
  749. struct filter_pred *pred);
  750. void event_trace_printk(unsigned long ip, const char *fmt, ...);
  751. extern struct ftrace_event_call __start_ftrace_events[];
  752. extern struct ftrace_event_call __stop_ftrace_events[];
  753. #define for_each_event(event) \
  754. for (event = __start_ftrace_events; \
  755. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  756. event++)
  757. extern const char *__start___trace_bprintk_fmt[];
  758. extern const char *__stop___trace_bprintk_fmt[];
  759. /*
  760. * The double __builtin_constant_p is because gcc will give us an error
  761. * if we try to allocate the static variable to fmt if it is not a
  762. * constant. Even with the outer if statement optimizing out.
  763. */
  764. #define event_trace_printk(ip, fmt, args...) \
  765. do { \
  766. __trace_printk_check_format(fmt, ##args); \
  767. tracing_record_cmdline(current); \
  768. if (__builtin_constant_p(fmt)) { \
  769. static const char *trace_printk_fmt \
  770. __attribute__((section("__trace_printk_fmt"))) = \
  771. __builtin_constant_p(fmt) ? fmt : NULL; \
  772. \
  773. __trace_bprintk(ip, trace_printk_fmt, ##args); \
  774. } else \
  775. __trace_printk(ip, fmt, ##args); \
  776. } while (0)
  777. #endif /* _LINUX_KERNEL_TRACE_H */