ftrace.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. #ifndef _LINUX_FTRACE_H
  2. #define _LINUX_FTRACE_H
  3. #include <linux/linkage.h>
  4. #include <linux/fs.h>
  5. #include <linux/ktime.h>
  6. #include <linux/init.h>
  7. #include <linux/types.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/bitops.h>
  10. #include <linux/sched.h>
  11. #ifdef CONFIG_FUNCTION_TRACER
  12. extern int ftrace_enabled;
  13. extern int
  14. ftrace_enable_sysctl(struct ctl_table *table, int write,
  15. struct file *filp, void __user *buffer, size_t *lenp,
  16. loff_t *ppos);
  17. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
  18. struct ftrace_ops {
  19. ftrace_func_t func;
  20. struct ftrace_ops *next;
  21. };
  22. extern int function_trace_stop;
  23. /*
  24. * Type of the current tracing.
  25. */
  26. enum ftrace_tracing_type_t {
  27. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  28. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  29. };
  30. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  31. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  32. /**
  33. * ftrace_stop - stop function tracer.
  34. *
  35. * A quick way to stop the function tracer. Note this an on off switch,
  36. * it is not something that is recursive like preempt_disable.
  37. * This does not disable the calling of mcount, it only stops the
  38. * calling of functions from mcount.
  39. */
  40. static inline void ftrace_stop(void)
  41. {
  42. function_trace_stop = 1;
  43. }
  44. /**
  45. * ftrace_start - start the function tracer.
  46. *
  47. * This function is the inverse of ftrace_stop. This does not enable
  48. * the function tracing if the function tracer is disabled. This only
  49. * sets the function tracer flag to continue calling the functions
  50. * from mcount.
  51. */
  52. static inline void ftrace_start(void)
  53. {
  54. function_trace_stop = 0;
  55. }
  56. /*
  57. * The ftrace_ops must be a static and should also
  58. * be read_mostly. These functions do modify read_mostly variables
  59. * so use them sparely. Never free an ftrace_op or modify the
  60. * next pointer after it has been registered. Even after unregistering
  61. * it, the next pointer may still be used internally.
  62. */
  63. int register_ftrace_function(struct ftrace_ops *ops);
  64. int unregister_ftrace_function(struct ftrace_ops *ops);
  65. void clear_ftrace_function(void);
  66. extern void ftrace_stub(unsigned long a0, unsigned long a1);
  67. #else /* !CONFIG_FUNCTION_TRACER */
  68. # define register_ftrace_function(ops) do { } while (0)
  69. # define unregister_ftrace_function(ops) do { } while (0)
  70. # define clear_ftrace_function(ops) do { } while (0)
  71. static inline void ftrace_kill(void) { }
  72. static inline void ftrace_stop(void) { }
  73. static inline void ftrace_start(void) { }
  74. #endif /* CONFIG_FUNCTION_TRACER */
  75. #ifdef CONFIG_DYNAMIC_FTRACE
  76. /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
  77. #include <asm/ftrace.h>
  78. enum {
  79. FTRACE_FL_FREE = (1 << 0),
  80. FTRACE_FL_FAILED = (1 << 1),
  81. FTRACE_FL_FILTER = (1 << 2),
  82. FTRACE_FL_ENABLED = (1 << 3),
  83. FTRACE_FL_NOTRACE = (1 << 4),
  84. FTRACE_FL_CONVERTED = (1 << 5),
  85. FTRACE_FL_FROZEN = (1 << 6),
  86. };
  87. struct dyn_ftrace {
  88. struct list_head list;
  89. unsigned long ip; /* address of mcount call-site */
  90. unsigned long flags;
  91. struct dyn_arch_ftrace arch;
  92. };
  93. int ftrace_force_update(void);
  94. void ftrace_set_filter(unsigned char *buf, int len, int reset);
  95. /* defined in arch */
  96. extern int ftrace_ip_converted(unsigned long ip);
  97. extern int ftrace_dyn_arch_init(void *data);
  98. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  99. extern void ftrace_caller(void);
  100. extern void ftrace_call(void);
  101. extern void mcount_call(void);
  102. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  103. extern void ftrace_graph_caller(void);
  104. extern int ftrace_enable_ftrace_graph_caller(void);
  105. extern int ftrace_disable_ftrace_graph_caller(void);
  106. #else
  107. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  108. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  109. #endif
  110. /**
  111. * ftrace_make_nop - convert code into top
  112. * @mod: module structure if called by module load initialization
  113. * @rec: the mcount call site record
  114. * @addr: the address that the call site should be calling
  115. *
  116. * This is a very sensitive operation and great care needs
  117. * to be taken by the arch. The operation should carefully
  118. * read the location, check to see if what is read is indeed
  119. * what we expect it to be, and then on success of the compare,
  120. * it should write to the location.
  121. *
  122. * The code segment at @rec->ip should be a caller to @addr
  123. *
  124. * Return must be:
  125. * 0 on success
  126. * -EFAULT on error reading the location
  127. * -EINVAL on a failed compare of the contents
  128. * -EPERM on error writing to the location
  129. * Any other value will be considered a failure.
  130. */
  131. extern int ftrace_make_nop(struct module *mod,
  132. struct dyn_ftrace *rec, unsigned long addr);
  133. /**
  134. * ftrace_make_call - convert a nop call site into a call to addr
  135. * @rec: the mcount call site record
  136. * @addr: the address that the call site should call
  137. *
  138. * This is a very sensitive operation and great care needs
  139. * to be taken by the arch. The operation should carefully
  140. * read the location, check to see if what is read is indeed
  141. * what we expect it to be, and then on success of the compare,
  142. * it should write to the location.
  143. *
  144. * The code segment at @rec->ip should be a nop
  145. *
  146. * Return must be:
  147. * 0 on success
  148. * -EFAULT on error reading the location
  149. * -EINVAL on a failed compare of the contents
  150. * -EPERM on error writing to the location
  151. * Any other value will be considered a failure.
  152. */
  153. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  154. /* May be defined in arch */
  155. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  156. extern int skip_trace(unsigned long ip);
  157. extern void ftrace_release(void *start, unsigned long size);
  158. extern void ftrace_disable_daemon(void);
  159. extern void ftrace_enable_daemon(void);
  160. #else
  161. # define skip_trace(ip) ({ 0; })
  162. # define ftrace_force_update() ({ 0; })
  163. # define ftrace_set_filter(buf, len, reset) do { } while (0)
  164. # define ftrace_disable_daemon() do { } while (0)
  165. # define ftrace_enable_daemon() do { } while (0)
  166. static inline void ftrace_release(void *start, unsigned long size) { }
  167. #endif /* CONFIG_DYNAMIC_FTRACE */
  168. /* totally disable ftrace - can not re-enable after this */
  169. void ftrace_kill(void);
  170. static inline void tracer_disable(void)
  171. {
  172. #ifdef CONFIG_FUNCTION_TRACER
  173. ftrace_enabled = 0;
  174. #endif
  175. }
  176. /*
  177. * Ftrace disable/restore without lock. Some synchronization mechanism
  178. * must be used to prevent ftrace_enabled to be changed between
  179. * disable/restore.
  180. */
  181. static inline int __ftrace_enabled_save(void)
  182. {
  183. #ifdef CONFIG_FUNCTION_TRACER
  184. int saved_ftrace_enabled = ftrace_enabled;
  185. ftrace_enabled = 0;
  186. return saved_ftrace_enabled;
  187. #else
  188. return 0;
  189. #endif
  190. }
  191. static inline void __ftrace_enabled_restore(int enabled)
  192. {
  193. #ifdef CONFIG_FUNCTION_TRACER
  194. ftrace_enabled = enabled;
  195. #endif
  196. }
  197. #ifdef CONFIG_FRAME_POINTER
  198. /* TODO: need to fix this for ARM */
  199. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  200. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  201. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  202. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  203. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  204. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  205. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  206. #else
  207. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  208. # define CALLER_ADDR1 0UL
  209. # define CALLER_ADDR2 0UL
  210. # define CALLER_ADDR3 0UL
  211. # define CALLER_ADDR4 0UL
  212. # define CALLER_ADDR5 0UL
  213. # define CALLER_ADDR6 0UL
  214. #endif
  215. #ifdef CONFIG_IRQSOFF_TRACER
  216. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  217. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  218. #else
  219. # define time_hardirqs_on(a0, a1) do { } while (0)
  220. # define time_hardirqs_off(a0, a1) do { } while (0)
  221. #endif
  222. #ifdef CONFIG_PREEMPT_TRACER
  223. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  224. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  225. #else
  226. # define trace_preempt_on(a0, a1) do { } while (0)
  227. # define trace_preempt_off(a0, a1) do { } while (0)
  228. #endif
  229. #ifdef CONFIG_TRACING
  230. extern int ftrace_dump_on_oops;
  231. extern void tracing_start(void);
  232. extern void tracing_stop(void);
  233. extern void ftrace_off_permanent(void);
  234. extern void
  235. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
  236. /**
  237. * ftrace_printk - printf formatting in the ftrace buffer
  238. * @fmt: the printf format for printing
  239. *
  240. * Note: __ftrace_printk is an internal function for ftrace_printk and
  241. * the @ip is passed in via the ftrace_printk macro.
  242. *
  243. * This function allows a kernel developer to debug fast path sections
  244. * that printk is not appropriate for. By scattering in various
  245. * printk like tracing in the code, a developer can quickly see
  246. * where problems are occurring.
  247. *
  248. * This is intended as a debugging tool for the developer only.
  249. * Please refrain from leaving ftrace_printks scattered around in
  250. * your code.
  251. */
  252. # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
  253. extern int
  254. __ftrace_printk(unsigned long ip, const char *fmt, ...)
  255. __attribute__ ((format (printf, 2, 3)));
  256. extern void ftrace_dump(void);
  257. #else
  258. static inline void
  259. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
  260. static inline int
  261. ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
  262. static inline void tracing_start(void) { }
  263. static inline void tracing_stop(void) { }
  264. static inline void ftrace_off_permanent(void) { }
  265. static inline int
  266. ftrace_printk(const char *fmt, ...)
  267. {
  268. return 0;
  269. }
  270. static inline void ftrace_dump(void) { }
  271. #endif
  272. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  273. extern void ftrace_init(void);
  274. extern void ftrace_init_module(struct module *mod,
  275. unsigned long *start, unsigned long *end);
  276. #else
  277. static inline void ftrace_init(void) { }
  278. static inline void
  279. ftrace_init_module(struct module *mod,
  280. unsigned long *start, unsigned long *end) { }
  281. #endif
  282. enum {
  283. POWER_NONE = 0,
  284. POWER_CSTATE = 1,
  285. POWER_PSTATE = 2,
  286. };
  287. struct power_trace {
  288. #ifdef CONFIG_POWER_TRACER
  289. ktime_t stamp;
  290. ktime_t end;
  291. int type;
  292. int state;
  293. #endif
  294. };
  295. #ifdef CONFIG_POWER_TRACER
  296. extern void trace_power_start(struct power_trace *it, unsigned int type,
  297. unsigned int state);
  298. extern void trace_power_mark(struct power_trace *it, unsigned int type,
  299. unsigned int state);
  300. extern void trace_power_end(struct power_trace *it);
  301. #else
  302. static inline void trace_power_start(struct power_trace *it, unsigned int type,
  303. unsigned int state) { }
  304. static inline void trace_power_mark(struct power_trace *it, unsigned int type,
  305. unsigned int state) { }
  306. static inline void trace_power_end(struct power_trace *it) { }
  307. #endif
  308. /*
  309. * Structure that defines an entry function trace.
  310. */
  311. struct ftrace_graph_ent {
  312. unsigned long func; /* Current function */
  313. int depth;
  314. };
  315. /*
  316. * Structure that defines a return function trace.
  317. */
  318. struct ftrace_graph_ret {
  319. unsigned long func; /* Current function */
  320. unsigned long long calltime;
  321. unsigned long long rettime;
  322. /* Number of functions that overran the depth limit for current task */
  323. unsigned long overrun;
  324. int depth;
  325. };
  326. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  327. /*
  328. * Sometimes we don't want to trace a function with the function
  329. * graph tracer but we want them to keep traced by the usual function
  330. * tracer if the function graph tracer is not configured.
  331. */
  332. #define __notrace_funcgraph notrace
  333. #define FTRACE_RETFUNC_DEPTH 50
  334. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  335. /* Type of the callback handlers for tracing function graph*/
  336. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  337. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  338. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  339. trace_func_graph_ent_t entryfunc);
  340. extern void ftrace_graph_stop(void);
  341. /* The current handlers in use */
  342. extern trace_func_graph_ret_t ftrace_graph_return;
  343. extern trace_func_graph_ent_t ftrace_graph_entry;
  344. extern void unregister_ftrace_graph(void);
  345. extern void ftrace_graph_init_task(struct task_struct *t);
  346. extern void ftrace_graph_exit_task(struct task_struct *t);
  347. static inline int task_curr_ret_stack(struct task_struct *t)
  348. {
  349. return t->curr_ret_stack;
  350. }
  351. #else
  352. #define __notrace_funcgraph
  353. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  354. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  355. static inline int task_curr_ret_stack(struct task_struct *tsk)
  356. {
  357. return -1;
  358. }
  359. #endif
  360. #ifdef CONFIG_TRACING
  361. #include <linux/sched.h>
  362. /* flags for current->trace */
  363. enum {
  364. TSK_TRACE_FL_TRACE_BIT = 0,
  365. TSK_TRACE_FL_GRAPH_BIT = 1,
  366. };
  367. enum {
  368. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  369. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  370. };
  371. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  372. {
  373. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  374. }
  375. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  376. {
  377. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  378. }
  379. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  380. {
  381. return tsk->trace & TSK_TRACE_FL_TRACE;
  382. }
  383. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  384. {
  385. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  386. }
  387. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  388. {
  389. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  390. }
  391. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  392. {
  393. return tsk->trace & TSK_TRACE_FL_GRAPH;
  394. }
  395. #endif /* CONFIG_TRACING */
  396. #endif /* _LINUX_FTRACE_H */