ftrace.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * Ftrace header. For implementation details beyond the random comments
  3. * scattered below, see: Documentation/trace/ftrace-design.txt
  4. */
  5. #ifndef _LINUX_FTRACE_H
  6. #define _LINUX_FTRACE_H
  7. #include <linux/trace_clock.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/linkage.h>
  10. #include <linux/bitops.h>
  11. #include <linux/module.h>
  12. #include <linux/ktime.h>
  13. #include <linux/sched.h>
  14. #include <linux/types.h>
  15. #include <linux/init.h>
  16. #include <linux/fs.h>
  17. #include <asm/ftrace.h>
  18. struct ftrace_hash;
  19. #ifdef CONFIG_FUNCTION_TRACER
  20. extern int ftrace_enabled;
  21. extern int
  22. ftrace_enable_sysctl(struct ctl_table *table, int write,
  23. void __user *buffer, size_t *lenp,
  24. loff_t *ppos);
  25. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
  26. enum {
  27. FTRACE_OPS_FL_ENABLED = 1 << 0,
  28. FTRACE_OPS_FL_GLOBAL = 1 << 1,
  29. FTRACE_OPS_FL_DYNAMIC = 1 << 2,
  30. };
  31. struct ftrace_ops {
  32. ftrace_func_t func;
  33. struct ftrace_ops *next;
  34. unsigned long flags;
  35. #ifdef CONFIG_DYNAMIC_FTRACE
  36. struct ftrace_hash *notrace_hash;
  37. struct ftrace_hash *filter_hash;
  38. #endif
  39. };
  40. extern int function_trace_stop;
  41. /*
  42. * Type of the current tracing.
  43. */
  44. enum ftrace_tracing_type_t {
  45. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  46. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  47. };
  48. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  49. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  50. /**
  51. * ftrace_stop - stop function tracer.
  52. *
  53. * A quick way to stop the function tracer. Note this an on off switch,
  54. * it is not something that is recursive like preempt_disable.
  55. * This does not disable the calling of mcount, it only stops the
  56. * calling of functions from mcount.
  57. */
  58. static inline void ftrace_stop(void)
  59. {
  60. function_trace_stop = 1;
  61. }
  62. /**
  63. * ftrace_start - start the function tracer.
  64. *
  65. * This function is the inverse of ftrace_stop. This does not enable
  66. * the function tracing if the function tracer is disabled. This only
  67. * sets the function tracer flag to continue calling the functions
  68. * from mcount.
  69. */
  70. static inline void ftrace_start(void)
  71. {
  72. function_trace_stop = 0;
  73. }
  74. /*
  75. * The ftrace_ops must be a static and should also
  76. * be read_mostly. These functions do modify read_mostly variables
  77. * so use them sparely. Never free an ftrace_op or modify the
  78. * next pointer after it has been registered. Even after unregistering
  79. * it, the next pointer may still be used internally.
  80. */
  81. int register_ftrace_function(struct ftrace_ops *ops);
  82. int unregister_ftrace_function(struct ftrace_ops *ops);
  83. void clear_ftrace_function(void);
  84. extern void ftrace_stub(unsigned long a0, unsigned long a1);
  85. #else /* !CONFIG_FUNCTION_TRACER */
  86. /*
  87. * (un)register_ftrace_function must be a macro since the ops parameter
  88. * must not be evaluated.
  89. */
  90. #define register_ftrace_function(ops) ({ 0; })
  91. #define unregister_ftrace_function(ops) ({ 0; })
  92. static inline void clear_ftrace_function(void) { }
  93. static inline void ftrace_kill(void) { }
  94. static inline void ftrace_stop(void) { }
  95. static inline void ftrace_start(void) { }
  96. #endif /* CONFIG_FUNCTION_TRACER */
  97. #ifdef CONFIG_STACK_TRACER
  98. extern int stack_tracer_enabled;
  99. int
  100. stack_trace_sysctl(struct ctl_table *table, int write,
  101. void __user *buffer, size_t *lenp,
  102. loff_t *ppos);
  103. #endif
  104. struct ftrace_func_command {
  105. struct list_head list;
  106. char *name;
  107. int (*func)(struct ftrace_hash *hash,
  108. char *func, char *cmd,
  109. char *params, int enable);
  110. };
  111. #ifdef CONFIG_DYNAMIC_FTRACE
  112. int ftrace_arch_code_modify_prepare(void);
  113. int ftrace_arch_code_modify_post_process(void);
  114. struct seq_file;
  115. struct ftrace_probe_ops {
  116. void (*func)(unsigned long ip,
  117. unsigned long parent_ip,
  118. void **data);
  119. int (*callback)(unsigned long ip, void **data);
  120. void (*free)(void **data);
  121. int (*print)(struct seq_file *m,
  122. unsigned long ip,
  123. struct ftrace_probe_ops *ops,
  124. void *data);
  125. };
  126. extern int
  127. register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  128. void *data);
  129. extern void
  130. unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  131. void *data);
  132. extern void
  133. unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
  134. extern void unregister_ftrace_function_probe_all(char *glob);
  135. extern int ftrace_text_reserved(void *start, void *end);
  136. enum {
  137. FTRACE_FL_ENABLED = (1 << 30),
  138. FTRACE_FL_FREE = (1 << 31),
  139. };
  140. #define FTRACE_FL_MASK (0x3UL << 30)
  141. #define FTRACE_REF_MAX ((1 << 30) - 1)
  142. struct dyn_ftrace {
  143. union {
  144. unsigned long ip; /* address of mcount call-site */
  145. struct dyn_ftrace *freelist;
  146. };
  147. union {
  148. unsigned long flags;
  149. struct dyn_ftrace *newlist;
  150. };
  151. struct dyn_arch_ftrace arch;
  152. };
  153. int ftrace_force_update(void);
  154. void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
  155. int len, int reset);
  156. void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
  157. int len, int reset);
  158. void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  159. void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  160. int register_ftrace_command(struct ftrace_func_command *cmd);
  161. int unregister_ftrace_command(struct ftrace_func_command *cmd);
  162. /* defined in arch */
  163. extern int ftrace_ip_converted(unsigned long ip);
  164. extern int ftrace_dyn_arch_init(void *data);
  165. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  166. extern void ftrace_caller(void);
  167. extern void ftrace_call(void);
  168. extern void mcount_call(void);
  169. #ifndef FTRACE_ADDR
  170. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  171. #endif
  172. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  173. extern void ftrace_graph_caller(void);
  174. extern int ftrace_enable_ftrace_graph_caller(void);
  175. extern int ftrace_disable_ftrace_graph_caller(void);
  176. #else
  177. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  178. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  179. #endif
  180. /**
  181. * ftrace_make_nop - convert code into nop
  182. * @mod: module structure if called by module load initialization
  183. * @rec: the mcount call site record
  184. * @addr: the address that the call site should be calling
  185. *
  186. * This is a very sensitive operation and great care needs
  187. * to be taken by the arch. The operation should carefully
  188. * read the location, check to see if what is read is indeed
  189. * what we expect it to be, and then on success of the compare,
  190. * it should write to the location.
  191. *
  192. * The code segment at @rec->ip should be a caller to @addr
  193. *
  194. * Return must be:
  195. * 0 on success
  196. * -EFAULT on error reading the location
  197. * -EINVAL on a failed compare of the contents
  198. * -EPERM on error writing to the location
  199. * Any other value will be considered a failure.
  200. */
  201. extern int ftrace_make_nop(struct module *mod,
  202. struct dyn_ftrace *rec, unsigned long addr);
  203. /**
  204. * ftrace_make_call - convert a nop call site into a call to addr
  205. * @rec: the mcount call site record
  206. * @addr: the address that the call site should call
  207. *
  208. * This is a very sensitive operation and great care needs
  209. * to be taken by the arch. The operation should carefully
  210. * read the location, check to see if what is read is indeed
  211. * what we expect it to be, and then on success of the compare,
  212. * it should write to the location.
  213. *
  214. * The code segment at @rec->ip should be a nop
  215. *
  216. * Return must be:
  217. * 0 on success
  218. * -EFAULT on error reading the location
  219. * -EINVAL on a failed compare of the contents
  220. * -EPERM on error writing to the location
  221. * Any other value will be considered a failure.
  222. */
  223. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  224. /* May be defined in arch */
  225. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  226. extern int skip_trace(unsigned long ip);
  227. extern void ftrace_disable_daemon(void);
  228. extern void ftrace_enable_daemon(void);
  229. #else
  230. static inline int skip_trace(unsigned long ip) { return 0; }
  231. static inline int ftrace_force_update(void) { return 0; }
  232. static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
  233. {
  234. }
  235. static inline void ftrace_disable_daemon(void) { }
  236. static inline void ftrace_enable_daemon(void) { }
  237. static inline void ftrace_release_mod(struct module *mod) {}
  238. static inline int register_ftrace_command(struct ftrace_func_command *cmd)
  239. {
  240. return -EINVAL;
  241. }
  242. static inline int unregister_ftrace_command(char *cmd_name)
  243. {
  244. return -EINVAL;
  245. }
  246. static inline int ftrace_text_reserved(void *start, void *end)
  247. {
  248. return 0;
  249. }
  250. #endif /* CONFIG_DYNAMIC_FTRACE */
  251. /* totally disable ftrace - can not re-enable after this */
  252. void ftrace_kill(void);
  253. static inline void tracer_disable(void)
  254. {
  255. #ifdef CONFIG_FUNCTION_TRACER
  256. ftrace_enabled = 0;
  257. #endif
  258. }
  259. /*
  260. * Ftrace disable/restore without lock. Some synchronization mechanism
  261. * must be used to prevent ftrace_enabled to be changed between
  262. * disable/restore.
  263. */
  264. static inline int __ftrace_enabled_save(void)
  265. {
  266. #ifdef CONFIG_FUNCTION_TRACER
  267. int saved_ftrace_enabled = ftrace_enabled;
  268. ftrace_enabled = 0;
  269. return saved_ftrace_enabled;
  270. #else
  271. return 0;
  272. #endif
  273. }
  274. static inline void __ftrace_enabled_restore(int enabled)
  275. {
  276. #ifdef CONFIG_FUNCTION_TRACER
  277. ftrace_enabled = enabled;
  278. #endif
  279. }
  280. #ifndef HAVE_ARCH_CALLER_ADDR
  281. # ifdef CONFIG_FRAME_POINTER
  282. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  283. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  284. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  285. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  286. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  287. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  288. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  289. # else
  290. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  291. # define CALLER_ADDR1 0UL
  292. # define CALLER_ADDR2 0UL
  293. # define CALLER_ADDR3 0UL
  294. # define CALLER_ADDR4 0UL
  295. # define CALLER_ADDR5 0UL
  296. # define CALLER_ADDR6 0UL
  297. # endif
  298. #endif /* ifndef HAVE_ARCH_CALLER_ADDR */
  299. #ifdef CONFIG_IRQSOFF_TRACER
  300. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  301. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  302. #else
  303. static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  304. static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
  305. #endif
  306. #ifdef CONFIG_PREEMPT_TRACER
  307. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  308. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  309. #else
  310. static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
  311. static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
  312. #endif
  313. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  314. extern void ftrace_init(void);
  315. #else
  316. static inline void ftrace_init(void) { }
  317. #endif
  318. /*
  319. * Structure that defines an entry function trace.
  320. */
  321. struct ftrace_graph_ent {
  322. unsigned long func; /* Current function */
  323. int depth;
  324. };
  325. /*
  326. * Structure that defines a return function trace.
  327. */
  328. struct ftrace_graph_ret {
  329. unsigned long func; /* Current function */
  330. unsigned long long calltime;
  331. unsigned long long rettime;
  332. /* Number of functions that overran the depth limit for current task */
  333. unsigned long overrun;
  334. int depth;
  335. };
  336. /* Type of the callback handlers for tracing function graph*/
  337. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  338. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  339. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  340. /* for init task */
  341. #define INIT_FTRACE_GRAPH .ret_stack = NULL,
  342. /*
  343. * Stack of return addresses for functions
  344. * of a thread.
  345. * Used in struct thread_info
  346. */
  347. struct ftrace_ret_stack {
  348. unsigned long ret;
  349. unsigned long func;
  350. unsigned long long calltime;
  351. unsigned long long subtime;
  352. unsigned long fp;
  353. };
  354. /*
  355. * Primary handler of a function return.
  356. * It relays on ftrace_return_to_handler.
  357. * Defined in entry_32/64.S
  358. */
  359. extern void return_to_handler(void);
  360. extern int
  361. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  362. unsigned long frame_pointer);
  363. /*
  364. * Sometimes we don't want to trace a function with the function
  365. * graph tracer but we want them to keep traced by the usual function
  366. * tracer if the function graph tracer is not configured.
  367. */
  368. #define __notrace_funcgraph notrace
  369. /*
  370. * We want to which function is an entrypoint of a hardirq.
  371. * That will help us to put a signal on output.
  372. */
  373. #define __irq_entry __attribute__((__section__(".irqentry.text")))
  374. /* Limits of hardirq entrypoints */
  375. extern char __irqentry_text_start[];
  376. extern char __irqentry_text_end[];
  377. #define FTRACE_RETFUNC_DEPTH 50
  378. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  379. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  380. trace_func_graph_ent_t entryfunc);
  381. extern void ftrace_graph_stop(void);
  382. /* The current handlers in use */
  383. extern trace_func_graph_ret_t ftrace_graph_return;
  384. extern trace_func_graph_ent_t ftrace_graph_entry;
  385. extern void unregister_ftrace_graph(void);
  386. extern void ftrace_graph_init_task(struct task_struct *t);
  387. extern void ftrace_graph_exit_task(struct task_struct *t);
  388. extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
  389. static inline int task_curr_ret_stack(struct task_struct *t)
  390. {
  391. return t->curr_ret_stack;
  392. }
  393. static inline void pause_graph_tracing(void)
  394. {
  395. atomic_inc(&current->tracing_graph_pause);
  396. }
  397. static inline void unpause_graph_tracing(void)
  398. {
  399. atomic_dec(&current->tracing_graph_pause);
  400. }
  401. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  402. #define __notrace_funcgraph
  403. #define __irq_entry
  404. #define INIT_FTRACE_GRAPH
  405. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  406. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  407. static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
  408. static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  409. trace_func_graph_ent_t entryfunc)
  410. {
  411. return -1;
  412. }
  413. static inline void unregister_ftrace_graph(void) { }
  414. static inline int task_curr_ret_stack(struct task_struct *tsk)
  415. {
  416. return -1;
  417. }
  418. static inline void pause_graph_tracing(void) { }
  419. static inline void unpause_graph_tracing(void) { }
  420. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  421. #ifdef CONFIG_TRACING
  422. /* flags for current->trace */
  423. enum {
  424. TSK_TRACE_FL_TRACE_BIT = 0,
  425. TSK_TRACE_FL_GRAPH_BIT = 1,
  426. };
  427. enum {
  428. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  429. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  430. };
  431. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  432. {
  433. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  434. }
  435. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  436. {
  437. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  438. }
  439. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  440. {
  441. return tsk->trace & TSK_TRACE_FL_TRACE;
  442. }
  443. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  444. {
  445. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  446. }
  447. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  448. {
  449. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  450. }
  451. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  452. {
  453. return tsk->trace & TSK_TRACE_FL_GRAPH;
  454. }
  455. enum ftrace_dump_mode;
  456. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  457. #ifdef CONFIG_PREEMPT
  458. #define INIT_TRACE_RECURSION .trace_recursion = 0,
  459. #endif
  460. #endif /* CONFIG_TRACING */
  461. #ifndef INIT_TRACE_RECURSION
  462. #define INIT_TRACE_RECURSION
  463. #endif
  464. #ifdef CONFIG_FTRACE_SYSCALLS
  465. unsigned long arch_syscall_addr(int nr);
  466. #endif /* CONFIG_FTRACE_SYSCALLS */
  467. #endif /* _LINUX_FTRACE_H */