trace.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. #ifndef _LINUX_KERNEL_TRACE_H
  2. #define _LINUX_KERNEL_TRACE_H
  3. #include <linux/fs.h>
  4. #include <asm/atomic.h>
  5. #include <linux/sched.h>
  6. #include <linux/clocksource.h>
  7. #include <linux/ring_buffer.h>
  8. #include <linux/mmiotrace.h>
  9. #include <linux/ftrace.h>
  10. #include <trace/boot.h>
  11. #include <trace/kmemtrace.h>
  12. #include <trace/power.h>
  13. enum trace_type {
  14. __TRACE_FIRST_TYPE = 0,
  15. TRACE_FN,
  16. TRACE_CTX,
  17. TRACE_WAKE,
  18. TRACE_STACK,
  19. TRACE_PRINT,
  20. TRACE_SPECIAL,
  21. TRACE_MMIO_RW,
  22. TRACE_MMIO_MAP,
  23. TRACE_BRANCH,
  24. TRACE_BOOT_CALL,
  25. TRACE_BOOT_RET,
  26. TRACE_GRAPH_RET,
  27. TRACE_GRAPH_ENT,
  28. TRACE_USER_STACK,
  29. TRACE_HW_BRANCHES,
  30. TRACE_SYSCALL_ENTER,
  31. TRACE_SYSCALL_EXIT,
  32. TRACE_KMEM_ALLOC,
  33. TRACE_KMEM_FREE,
  34. TRACE_POWER,
  35. TRACE_BLK,
  36. __TRACE_LAST_TYPE,
  37. };
  38. /*
  39. * The trace entry - the most basic unit of tracing. This is what
  40. * is printed in the end as a single line in the trace output, such as:
  41. *
  42. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  43. */
  44. struct trace_entry {
  45. unsigned char type;
  46. unsigned char flags;
  47. unsigned char preempt_count;
  48. int pid;
  49. int tgid;
  50. };
  51. /*
  52. * Function trace entry - function address and parent function addres:
  53. */
  54. struct ftrace_entry {
  55. struct trace_entry ent;
  56. unsigned long ip;
  57. unsigned long parent_ip;
  58. };
  59. /* Function call entry */
  60. struct ftrace_graph_ent_entry {
  61. struct trace_entry ent;
  62. struct ftrace_graph_ent graph_ent;
  63. };
  64. /* Function return entry */
  65. struct ftrace_graph_ret_entry {
  66. struct trace_entry ent;
  67. struct ftrace_graph_ret ret;
  68. };
  69. extern struct tracer boot_tracer;
  70. /*
  71. * Context switch trace entry - which task (and prio) we switched from/to:
  72. */
  73. struct ctx_switch_entry {
  74. struct trace_entry ent;
  75. unsigned int prev_pid;
  76. unsigned char prev_prio;
  77. unsigned char prev_state;
  78. unsigned int next_pid;
  79. unsigned char next_prio;
  80. unsigned char next_state;
  81. unsigned int next_cpu;
  82. };
  83. /*
  84. * Special (free-form) trace entry:
  85. */
  86. struct special_entry {
  87. struct trace_entry ent;
  88. unsigned long arg1;
  89. unsigned long arg2;
  90. unsigned long arg3;
  91. };
  92. /*
  93. * Stack-trace entry:
  94. */
  95. #define FTRACE_STACK_ENTRIES 8
  96. struct stack_entry {
  97. struct trace_entry ent;
  98. unsigned long caller[FTRACE_STACK_ENTRIES];
  99. };
  100. struct userstack_entry {
  101. struct trace_entry ent;
  102. unsigned long caller[FTRACE_STACK_ENTRIES];
  103. };
  104. /*
  105. * trace_printk entry:
  106. */
  107. struct print_entry {
  108. struct trace_entry ent;
  109. unsigned long ip;
  110. int depth;
  111. const char *fmt;
  112. u32 buf[];
  113. };
  114. #define TRACE_OLD_SIZE 88
  115. struct trace_field_cont {
  116. unsigned char type;
  117. /* Temporary till we get rid of this completely */
  118. char buf[TRACE_OLD_SIZE - 1];
  119. };
  120. struct trace_mmiotrace_rw {
  121. struct trace_entry ent;
  122. struct mmiotrace_rw rw;
  123. };
  124. struct trace_mmiotrace_map {
  125. struct trace_entry ent;
  126. struct mmiotrace_map map;
  127. };
  128. struct trace_boot_call {
  129. struct trace_entry ent;
  130. struct boot_trace_call boot_call;
  131. };
  132. struct trace_boot_ret {
  133. struct trace_entry ent;
  134. struct boot_trace_ret boot_ret;
  135. };
  136. #define TRACE_FUNC_SIZE 30
  137. #define TRACE_FILE_SIZE 20
  138. struct trace_branch {
  139. struct trace_entry ent;
  140. unsigned line;
  141. char func[TRACE_FUNC_SIZE+1];
  142. char file[TRACE_FILE_SIZE+1];
  143. char correct;
  144. };
  145. struct hw_branch_entry {
  146. struct trace_entry ent;
  147. u64 from;
  148. u64 to;
  149. };
  150. struct trace_power {
  151. struct trace_entry ent;
  152. struct power_trace state_data;
  153. };
  154. struct kmemtrace_alloc_entry {
  155. struct trace_entry ent;
  156. enum kmemtrace_type_id type_id;
  157. unsigned long call_site;
  158. const void *ptr;
  159. size_t bytes_req;
  160. size_t bytes_alloc;
  161. gfp_t gfp_flags;
  162. int node;
  163. };
  164. struct kmemtrace_free_entry {
  165. struct trace_entry ent;
  166. enum kmemtrace_type_id type_id;
  167. unsigned long call_site;
  168. const void *ptr;
  169. };
  170. struct syscall_trace_enter {
  171. struct trace_entry ent;
  172. int nr;
  173. unsigned long args[];
  174. };
  175. struct syscall_trace_exit {
  176. struct trace_entry ent;
  177. int nr;
  178. unsigned long ret;
  179. };
  180. /*
  181. * trace_flag_type is an enumeration that holds different
  182. * states when a trace occurs. These are:
  183. * IRQS_OFF - interrupts were disabled
  184. * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
  185. * NEED_RESCED - reschedule is requested
  186. * HARDIRQ - inside an interrupt handler
  187. * SOFTIRQ - inside a softirq handler
  188. */
  189. enum trace_flag_type {
  190. TRACE_FLAG_IRQS_OFF = 0x01,
  191. TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
  192. TRACE_FLAG_NEED_RESCHED = 0x04,
  193. TRACE_FLAG_HARDIRQ = 0x08,
  194. TRACE_FLAG_SOFTIRQ = 0x10,
  195. };
  196. #define TRACE_BUF_SIZE 1024
  197. /*
  198. * The CPU trace array - it consists of thousands of trace entries
  199. * plus some other descriptor data: (for example which task started
  200. * the trace, etc.)
  201. */
  202. struct trace_array_cpu {
  203. atomic_t disabled;
  204. void *buffer_page; /* ring buffer spare */
  205. /* these fields get copied into max-trace: */
  206. unsigned long trace_idx;
  207. unsigned long overrun;
  208. unsigned long saved_latency;
  209. unsigned long critical_start;
  210. unsigned long critical_end;
  211. unsigned long critical_sequence;
  212. unsigned long nice;
  213. unsigned long policy;
  214. unsigned long rt_priority;
  215. cycle_t preempt_timestamp;
  216. pid_t pid;
  217. uid_t uid;
  218. char comm[TASK_COMM_LEN];
  219. };
  220. struct trace_iterator;
  221. /*
  222. * The trace array - an array of per-CPU trace arrays. This is the
  223. * highest level data structure that individual tracers deal with.
  224. * They have on/off state as well:
  225. */
  226. struct trace_array {
  227. struct ring_buffer *buffer;
  228. unsigned long entries;
  229. int cpu;
  230. cycle_t time_start;
  231. struct task_struct *waiter;
  232. struct trace_array_cpu *data[NR_CPUS];
  233. };
  234. #define FTRACE_CMP_TYPE(var, type) \
  235. __builtin_types_compatible_p(typeof(var), type *)
  236. #undef IF_ASSIGN
  237. #define IF_ASSIGN(var, entry, etype, id) \
  238. if (FTRACE_CMP_TYPE(var, etype)) { \
  239. var = (typeof(var))(entry); \
  240. WARN_ON(id && (entry)->type != id); \
  241. break; \
  242. }
  243. /* Will cause compile errors if type is not found. */
  244. extern void __ftrace_bad_type(void);
  245. /*
  246. * The trace_assign_type is a verifier that the entry type is
  247. * the same as the type being assigned. To add new types simply
  248. * add a line with the following format:
  249. *
  250. * IF_ASSIGN(var, ent, type, id);
  251. *
  252. * Where "type" is the trace type that includes the trace_entry
  253. * as the "ent" item. And "id" is the trace identifier that is
  254. * used in the trace_type enum.
  255. *
  256. * If the type can have more than one id, then use zero.
  257. */
  258. #define trace_assign_type(var, ent) \
  259. do { \
  260. IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
  261. IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
  262. IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
  263. IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
  264. IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
  265. IF_ASSIGN(var, ent, struct special_entry, 0); \
  266. IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
  267. TRACE_MMIO_RW); \
  268. IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
  269. TRACE_MMIO_MAP); \
  270. IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
  271. IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
  272. IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
  273. IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
  274. TRACE_GRAPH_ENT); \
  275. IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
  276. TRACE_GRAPH_RET); \
  277. IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
  278. IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
  279. IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
  280. TRACE_KMEM_ALLOC); \
  281. IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
  282. TRACE_KMEM_FREE); \
  283. IF_ASSIGN(var, ent, struct syscall_trace_enter, \
  284. TRACE_SYSCALL_ENTER); \
  285. IF_ASSIGN(var, ent, struct syscall_trace_exit, \
  286. TRACE_SYSCALL_EXIT); \
  287. __ftrace_bad_type(); \
  288. } while (0)
  289. /* Return values for print_line callback */
  290. enum print_line_t {
  291. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  292. TRACE_TYPE_HANDLED = 1,
  293. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  294. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  295. };
  296. /*
  297. * An option specific to a tracer. This is a boolean value.
  298. * The bit is the bit index that sets its value on the
  299. * flags value in struct tracer_flags.
  300. */
  301. struct tracer_opt {
  302. const char *name; /* Will appear on the trace_options file */
  303. u32 bit; /* Mask assigned in val field in tracer_flags */
  304. };
  305. /*
  306. * The set of specific options for a tracer. Your tracer
  307. * have to set the initial value of the flags val.
  308. */
  309. struct tracer_flags {
  310. u32 val;
  311. struct tracer_opt *opts;
  312. };
  313. /* Makes more easy to define a tracer opt */
  314. #define TRACER_OPT(s, b) .name = #s, .bit = b
  315. /**
  316. * struct tracer - a specific tracer and its callbacks to interact with debugfs
  317. * @name: the name chosen to select it on the available_tracers file
  318. * @init: called when one switches to this tracer (echo name > current_tracer)
  319. * @reset: called when one switches to another tracer
  320. * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
  321. * @stop: called when tracing is paused (echo 0 > tracing_enabled)
  322. * @open: called when the trace file is opened
  323. * @pipe_open: called when the trace_pipe file is opened
  324. * @wait_pipe: override how the user waits for traces on trace_pipe
  325. * @close: called when the trace file is released
  326. * @read: override the default read callback on trace_pipe
  327. * @splice_read: override the default splice_read callback on trace_pipe
  328. * @selftest: selftest to run on boot (see trace_selftest.c)
  329. * @print_headers: override the first lines that describe your columns
  330. * @print_line: callback that prints a trace
  331. * @set_flag: signals one of your private flags changed (trace_options file)
  332. * @flags: your private flags
  333. */
  334. struct tracer {
  335. const char *name;
  336. int (*init)(struct trace_array *tr);
  337. void (*reset)(struct trace_array *tr);
  338. void (*start)(struct trace_array *tr);
  339. void (*stop)(struct trace_array *tr);
  340. void (*open)(struct trace_iterator *iter);
  341. void (*pipe_open)(struct trace_iterator *iter);
  342. void (*wait_pipe)(struct trace_iterator *iter);
  343. void (*close)(struct trace_iterator *iter);
  344. ssize_t (*read)(struct trace_iterator *iter,
  345. struct file *filp, char __user *ubuf,
  346. size_t cnt, loff_t *ppos);
  347. ssize_t (*splice_read)(struct trace_iterator *iter,
  348. struct file *filp,
  349. loff_t *ppos,
  350. struct pipe_inode_info *pipe,
  351. size_t len,
  352. unsigned int flags);
  353. #ifdef CONFIG_FTRACE_STARTUP_TEST
  354. int (*selftest)(struct tracer *trace,
  355. struct trace_array *tr);
  356. #endif
  357. void (*print_header)(struct seq_file *m);
  358. enum print_line_t (*print_line)(struct trace_iterator *iter);
  359. /* If you handled the flag setting, return 0 */
  360. int (*set_flag)(u32 old_flags, u32 bit, int set);
  361. struct tracer *next;
  362. int print_max;
  363. struct tracer_flags *flags;
  364. struct tracer_stat *stats;
  365. };
  366. struct trace_seq {
  367. unsigned char buffer[PAGE_SIZE];
  368. unsigned int len;
  369. unsigned int readpos;
  370. };
  371. static inline void
  372. trace_seq_init(struct trace_seq *s)
  373. {
  374. s->len = 0;
  375. s->readpos = 0;
  376. }
  377. #define TRACE_PIPE_ALL_CPU -1
  378. /*
  379. * Trace iterator - used by printout routines who present trace
  380. * results to users and which routines might sleep, etc:
  381. */
  382. struct trace_iterator {
  383. struct trace_array *tr;
  384. struct tracer *trace;
  385. void *private;
  386. int cpu_file;
  387. struct mutex mutex;
  388. struct ring_buffer_iter *buffer_iter[NR_CPUS];
  389. /* The below is zeroed out in pipe_read */
  390. struct trace_seq seq;
  391. struct trace_entry *ent;
  392. int cpu;
  393. u64 ts;
  394. unsigned long iter_flags;
  395. loff_t pos;
  396. long idx;
  397. cpumask_var_t started;
  398. };
  399. int tracer_init(struct tracer *t, struct trace_array *tr);
  400. int tracing_is_enabled(void);
  401. void trace_wake_up(void);
  402. void tracing_reset(struct trace_array *tr, int cpu);
  403. void tracing_reset_online_cpus(struct trace_array *tr);
  404. int tracing_open_generic(struct inode *inode, struct file *filp);
  405. struct dentry *tracing_init_dentry(void);
  406. void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
  407. struct ring_buffer_event;
  408. struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
  409. unsigned char type,
  410. unsigned long len,
  411. unsigned long flags,
  412. int pc);
  413. void trace_buffer_unlock_commit(struct trace_array *tr,
  414. struct ring_buffer_event *event,
  415. unsigned long flags, int pc);
  416. struct ring_buffer_event *
  417. trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
  418. unsigned long flags, int pc);
  419. void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
  420. unsigned long flags, int pc);
  421. struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
  422. struct trace_array_cpu *data);
  423. struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
  424. int *ent_cpu, u64 *ent_ts);
  425. void tracing_generic_entry_update(struct trace_entry *entry,
  426. unsigned long flags,
  427. int pc);
  428. void default_wait_pipe(struct trace_iterator *iter);
  429. void poll_wait_pipe(struct trace_iterator *iter);
  430. void ftrace(struct trace_array *tr,
  431. struct trace_array_cpu *data,
  432. unsigned long ip,
  433. unsigned long parent_ip,
  434. unsigned long flags, int pc);
  435. void tracing_sched_switch_trace(struct trace_array *tr,
  436. struct task_struct *prev,
  437. struct task_struct *next,
  438. unsigned long flags, int pc);
  439. void tracing_record_cmdline(struct task_struct *tsk);
  440. void tracing_sched_wakeup_trace(struct trace_array *tr,
  441. struct task_struct *wakee,
  442. struct task_struct *cur,
  443. unsigned long flags, int pc);
  444. void trace_special(struct trace_array *tr,
  445. struct trace_array_cpu *data,
  446. unsigned long arg1,
  447. unsigned long arg2,
  448. unsigned long arg3, int pc);
  449. void trace_function(struct trace_array *tr,
  450. unsigned long ip,
  451. unsigned long parent_ip,
  452. unsigned long flags, int pc);
  453. void trace_graph_return(struct ftrace_graph_ret *trace);
  454. int trace_graph_entry(struct ftrace_graph_ent *trace);
  455. void tracing_start_cmdline_record(void);
  456. void tracing_stop_cmdline_record(void);
  457. void tracing_sched_switch_assign_trace(struct trace_array *tr);
  458. void tracing_stop_sched_switch_record(void);
  459. void tracing_start_sched_switch_record(void);
  460. int register_tracer(struct tracer *type);
  461. void unregister_tracer(struct tracer *type);
  462. extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  463. extern unsigned long tracing_max_latency;
  464. extern unsigned long tracing_thresh;
  465. void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
  466. void update_max_tr_single(struct trace_array *tr,
  467. struct task_struct *tsk, int cpu);
  468. void __trace_stack(struct trace_array *tr,
  469. unsigned long flags,
  470. int skip, int pc);
  471. extern cycle_t ftrace_now(int cpu);
  472. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  473. typedef void
  474. (*tracer_switch_func_t)(void *private,
  475. void *__rq,
  476. struct task_struct *prev,
  477. struct task_struct *next);
  478. struct tracer_switch_ops {
  479. tracer_switch_func_t func;
  480. void *private;
  481. struct tracer_switch_ops *next;
  482. };
  483. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  484. extern char *trace_find_cmdline(int pid);
  485. #ifdef CONFIG_DYNAMIC_FTRACE
  486. extern unsigned long ftrace_update_tot_cnt;
  487. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  488. extern int DYN_FTRACE_TEST_NAME(void);
  489. #endif
  490. #ifdef CONFIG_FTRACE_STARTUP_TEST
  491. extern int trace_selftest_startup_function(struct tracer *trace,
  492. struct trace_array *tr);
  493. extern int trace_selftest_startup_function_graph(struct tracer *trace,
  494. struct trace_array *tr);
  495. extern int trace_selftest_startup_irqsoff(struct tracer *trace,
  496. struct trace_array *tr);
  497. extern int trace_selftest_startup_preemptoff(struct tracer *trace,
  498. struct trace_array *tr);
  499. extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
  500. struct trace_array *tr);
  501. extern int trace_selftest_startup_wakeup(struct tracer *trace,
  502. struct trace_array *tr);
  503. extern int trace_selftest_startup_nop(struct tracer *trace,
  504. struct trace_array *tr);
  505. extern int trace_selftest_startup_sched_switch(struct tracer *trace,
  506. struct trace_array *tr);
  507. extern int trace_selftest_startup_sysprof(struct tracer *trace,
  508. struct trace_array *tr);
  509. extern int trace_selftest_startup_branch(struct tracer *trace,
  510. struct trace_array *tr);
  511. #endif /* CONFIG_FTRACE_STARTUP_TEST */
  512. extern void *head_page(struct trace_array_cpu *data);
  513. extern long ns2usecs(cycle_t nsec);
  514. extern int
  515. trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
  516. extern unsigned long trace_flags;
  517. /* Standard output formatting function used for function return traces */
  518. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  519. extern enum print_line_t print_graph_function(struct trace_iterator *iter);
  520. #ifdef CONFIG_DYNAMIC_FTRACE
  521. /* TODO: make this variable */
  522. #define FTRACE_GRAPH_MAX_FUNCS 32
  523. extern int ftrace_graph_count;
  524. extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
  525. static inline int ftrace_graph_addr(unsigned long addr)
  526. {
  527. int i;
  528. if (!ftrace_graph_count || test_tsk_trace_graph(current))
  529. return 1;
  530. for (i = 0; i < ftrace_graph_count; i++) {
  531. if (addr == ftrace_graph_funcs[i])
  532. return 1;
  533. }
  534. return 0;
  535. }
  536. #else
  537. static inline int ftrace_trace_addr(unsigned long addr)
  538. {
  539. return 1;
  540. }
  541. static inline int ftrace_graph_addr(unsigned long addr)
  542. {
  543. return 1;
  544. }
  545. #endif /* CONFIG_DYNAMIC_FTRACE */
  546. #else /* CONFIG_FUNCTION_GRAPH_TRACER */
  547. static inline enum print_line_t
  548. print_graph_function(struct trace_iterator *iter)
  549. {
  550. return TRACE_TYPE_UNHANDLED;
  551. }
  552. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  553. extern struct pid *ftrace_pid_trace;
  554. static inline int ftrace_trace_task(struct task_struct *task)
  555. {
  556. if (!ftrace_pid_trace)
  557. return 1;
  558. return test_tsk_trace_trace(task);
  559. }
  560. /*
  561. * trace_iterator_flags is an enumeration that defines bit
  562. * positions into trace_flags that controls the output.
  563. *
  564. * NOTE: These bits must match the trace_options array in
  565. * trace.c.
  566. */
  567. enum trace_iterator_flags {
  568. TRACE_ITER_PRINT_PARENT = 0x01,
  569. TRACE_ITER_SYM_OFFSET = 0x02,
  570. TRACE_ITER_SYM_ADDR = 0x04,
  571. TRACE_ITER_VERBOSE = 0x08,
  572. TRACE_ITER_RAW = 0x10,
  573. TRACE_ITER_HEX = 0x20,
  574. TRACE_ITER_BIN = 0x40,
  575. TRACE_ITER_BLOCK = 0x80,
  576. TRACE_ITER_STACKTRACE = 0x100,
  577. TRACE_ITER_SCHED_TREE = 0x200,
  578. TRACE_ITER_PRINTK = 0x400,
  579. TRACE_ITER_PREEMPTONLY = 0x800,
  580. TRACE_ITER_BRANCH = 0x1000,
  581. TRACE_ITER_ANNOTATE = 0x2000,
  582. TRACE_ITER_USERSTACKTRACE = 0x4000,
  583. TRACE_ITER_SYM_USEROBJ = 0x8000,
  584. TRACE_ITER_PRINTK_MSGONLY = 0x10000,
  585. TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
  586. TRACE_ITER_LATENCY_FMT = 0x40000,
  587. };
  588. /*
  589. * TRACE_ITER_SYM_MASK masks the options in trace_flags that
  590. * control the output of kernel symbols.
  591. */
  592. #define TRACE_ITER_SYM_MASK \
  593. (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
  594. extern struct tracer nop_trace;
  595. /**
  596. * ftrace_preempt_disable - disable preemption scheduler safe
  597. *
  598. * When tracing can happen inside the scheduler, there exists
  599. * cases that the tracing might happen before the need_resched
  600. * flag is checked. If this happens and the tracer calls
  601. * preempt_enable (after a disable), a schedule might take place
  602. * causing an infinite recursion.
  603. *
  604. * To prevent this, we read the need_resched flag before
  605. * disabling preemption. When we want to enable preemption we
  606. * check the flag, if it is set, then we call preempt_enable_no_resched.
  607. * Otherwise, we call preempt_enable.
  608. *
  609. * The rational for doing the above is that if need_resched is set
  610. * and we have yet to reschedule, we are either in an atomic location
  611. * (where we do not need to check for scheduling) or we are inside
  612. * the scheduler and do not want to resched.
  613. */
  614. static inline int ftrace_preempt_disable(void)
  615. {
  616. int resched;
  617. resched = need_resched();
  618. preempt_disable_notrace();
  619. return resched;
  620. }
  621. /**
  622. * ftrace_preempt_enable - enable preemption scheduler safe
  623. * @resched: the return value from ftrace_preempt_disable
  624. *
  625. * This is a scheduler safe way to enable preemption and not miss
  626. * any preemption checks. The disabled saved the state of preemption.
  627. * If resched is set, then we are either inside an atomic or
  628. * are inside the scheduler (we would have already scheduled
  629. * otherwise). In this case, we do not want to call normal
  630. * preempt_enable, but preempt_enable_no_resched instead.
  631. */
  632. static inline void ftrace_preempt_enable(int resched)
  633. {
  634. if (resched)
  635. preempt_enable_no_resched_notrace();
  636. else
  637. preempt_enable_notrace();
  638. }
  639. #ifdef CONFIG_BRANCH_TRACER
  640. extern int enable_branch_tracing(struct trace_array *tr);
  641. extern void disable_branch_tracing(void);
  642. static inline int trace_branch_enable(struct trace_array *tr)
  643. {
  644. if (trace_flags & TRACE_ITER_BRANCH)
  645. return enable_branch_tracing(tr);
  646. return 0;
  647. }
  648. static inline void trace_branch_disable(void)
  649. {
  650. /* due to races, always disable */
  651. disable_branch_tracing();
  652. }
  653. #else
  654. static inline int trace_branch_enable(struct trace_array *tr)
  655. {
  656. return 0;
  657. }
  658. static inline void trace_branch_disable(void)
  659. {
  660. }
  661. #endif /* CONFIG_BRANCH_TRACER */
  662. /* trace event type bit fields, not numeric */
  663. enum {
  664. TRACE_EVENT_TYPE_PRINTF = 1,
  665. TRACE_EVENT_TYPE_RAW = 2,
  666. };
  667. struct ftrace_event_call {
  668. char *name;
  669. char *system;
  670. struct dentry *dir;
  671. int enabled;
  672. int (*regfunc)(void);
  673. void (*unregfunc)(void);
  674. int id;
  675. int (*raw_init)(void);
  676. int (*show_format)(struct trace_seq *s);
  677. };
  678. void event_trace_printk(unsigned long ip, const char *fmt, ...);
  679. extern struct ftrace_event_call __start_ftrace_events[];
  680. extern struct ftrace_event_call __stop_ftrace_events[];
  681. #endif /* _LINUX_KERNEL_TRACE_H */