ftrace.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825
  1. /*
  2. * Ftrace header. For implementation details beyond the random comments
  3. * scattered below, see: Documentation/trace/ftrace-design.txt
  4. */
  5. #ifndef _LINUX_FTRACE_H
  6. #define _LINUX_FTRACE_H
  7. #include <linux/trace_clock.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/linkage.h>
  10. #include <linux/bitops.h>
  11. #include <linux/ptrace.h>
  12. #include <linux/ktime.h>
  13. #include <linux/sched.h>
  14. #include <linux/types.h>
  15. #include <linux/init.h>
  16. #include <linux/fs.h>
  17. #include <asm/ftrace.h>
  18. /*
  19. * If the arch supports passing the variable contents of
  20. * function_trace_op as the third parameter back from the
  21. * mcount call, then the arch should define this as 1.
  22. */
  23. #ifndef ARCH_SUPPORTS_FTRACE_OPS
  24. #define ARCH_SUPPORTS_FTRACE_OPS 0
  25. #endif
  26. /*
  27. * If the arch's mcount caller does not support all of ftrace's
  28. * features, then it must call an indirect function that
  29. * does. Or at least does enough to prevent any unwelcomed side effects.
  30. */
  31. #if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
  32. !ARCH_SUPPORTS_FTRACE_OPS
  33. # define FTRACE_FORCE_LIST_FUNC 1
  34. #else
  35. # define FTRACE_FORCE_LIST_FUNC 0
  36. #endif
  37. struct module;
  38. struct ftrace_hash;
  39. #ifdef CONFIG_FUNCTION_TRACER
  40. extern int ftrace_enabled;
  41. extern int
  42. ftrace_enable_sysctl(struct ctl_table *table, int write,
  43. void __user *buffer, size_t *lenp,
  44. loff_t *ppos);
  45. struct ftrace_ops;
  46. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  47. struct ftrace_ops *op, struct pt_regs *regs);
  48. /*
  49. * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  50. * set in the flags member.
  51. *
  52. * ENABLED - set/unset when ftrace_ops is registered/unregistered
  53. * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
  54. * is part of the global tracers sharing the same filter
  55. * via set_ftrace_* debugfs files.
  56. * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  57. * allocated ftrace_ops which need special care
  58. * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
  59. * could be controled by following calls:
  60. * ftrace_function_local_enable
  61. * ftrace_function_local_disable
  62. * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  63. * and passed to the callback. If this flag is set, but the
  64. * architecture does not support passing regs
  65. * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
  66. * ftrace_ops will fail to register, unless the next flag
  67. * is set.
  68. * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
  69. * handler can handle an arch that does not save regs
  70. * (the handler tests if regs == NULL), then it can set
  71. * this flag instead. It will not fail registering the ftrace_ops
  72. * but, the regs field will be NULL if the arch does not support
  73. * passing regs to the handler.
  74. * Note, if this flag is set, the SAVE_REGS flag will automatically
  75. * get set upon registering the ftrace_ops, if the arch supports it.
  76. * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
  77. * that the call back has its own recursion protection. If it does
  78. * not set this, then the ftrace infrastructure will add recursion
  79. * protection for the caller.
  80. */
  81. enum {
  82. FTRACE_OPS_FL_ENABLED = 1 << 0,
  83. FTRACE_OPS_FL_GLOBAL = 1 << 1,
  84. FTRACE_OPS_FL_DYNAMIC = 1 << 2,
  85. FTRACE_OPS_FL_CONTROL = 1 << 3,
  86. FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
  87. FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
  88. FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
  89. };
  90. struct ftrace_ops {
  91. ftrace_func_t func;
  92. struct ftrace_ops *next;
  93. unsigned long flags;
  94. int __percpu *disabled;
  95. #ifdef CONFIG_DYNAMIC_FTRACE
  96. struct ftrace_hash *notrace_hash;
  97. struct ftrace_hash *filter_hash;
  98. #endif
  99. };
  100. extern int function_trace_stop;
  101. /*
  102. * Type of the current tracing.
  103. */
  104. enum ftrace_tracing_type_t {
  105. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  106. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  107. };
  108. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  109. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  110. /**
  111. * ftrace_stop - stop function tracer.
  112. *
  113. * A quick way to stop the function tracer. Note this an on off switch,
  114. * it is not something that is recursive like preempt_disable.
  115. * This does not disable the calling of mcount, it only stops the
  116. * calling of functions from mcount.
  117. */
  118. static inline void ftrace_stop(void)
  119. {
  120. function_trace_stop = 1;
  121. }
  122. /**
  123. * ftrace_start - start the function tracer.
  124. *
  125. * This function is the inverse of ftrace_stop. This does not enable
  126. * the function tracing if the function tracer is disabled. This only
  127. * sets the function tracer flag to continue calling the functions
  128. * from mcount.
  129. */
  130. static inline void ftrace_start(void)
  131. {
  132. function_trace_stop = 0;
  133. }
  134. /*
  135. * The ftrace_ops must be a static and should also
  136. * be read_mostly. These functions do modify read_mostly variables
  137. * so use them sparely. Never free an ftrace_op or modify the
  138. * next pointer after it has been registered. Even after unregistering
  139. * it, the next pointer may still be used internally.
  140. */
  141. int register_ftrace_function(struct ftrace_ops *ops);
  142. int unregister_ftrace_function(struct ftrace_ops *ops);
  143. void clear_ftrace_function(void);
  144. /**
  145. * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
  146. *
  147. * This function enables tracing on current cpu by decreasing
  148. * the per cpu control variable.
  149. * It must be called with preemption disabled and only on ftrace_ops
  150. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  151. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  152. */
  153. static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
  154. {
  155. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  156. return;
  157. (*this_cpu_ptr(ops->disabled))--;
  158. }
  159. /**
  160. * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
  161. *
  162. * This function enables tracing on current cpu by decreasing
  163. * the per cpu control variable.
  164. * It must be called with preemption disabled and only on ftrace_ops
  165. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  166. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  167. */
  168. static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
  169. {
  170. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  171. return;
  172. (*this_cpu_ptr(ops->disabled))++;
  173. }
  174. /**
  175. * ftrace_function_local_disabled - returns ftrace_ops disabled value
  176. * on current cpu
  177. *
  178. * This function returns value of ftrace_ops::disabled on current cpu.
  179. * It must be called with preemption disabled and only on ftrace_ops
  180. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  181. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  182. */
  183. static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
  184. {
  185. WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
  186. return *this_cpu_ptr(ops->disabled);
  187. }
  188. extern void ftrace_stub(unsigned long a0, unsigned long a1,
  189. struct ftrace_ops *op, struct pt_regs *regs);
  190. #else /* !CONFIG_FUNCTION_TRACER */
  191. /*
  192. * (un)register_ftrace_function must be a macro since the ops parameter
  193. * must not be evaluated.
  194. */
  195. #define register_ftrace_function(ops) ({ 0; })
  196. #define unregister_ftrace_function(ops) ({ 0; })
  197. static inline void clear_ftrace_function(void) { }
  198. static inline void ftrace_kill(void) { }
  199. static inline void ftrace_stop(void) { }
  200. static inline void ftrace_start(void) { }
  201. #endif /* CONFIG_FUNCTION_TRACER */
  202. #ifdef CONFIG_STACK_TRACER
  203. extern int stack_tracer_enabled;
  204. int
  205. stack_trace_sysctl(struct ctl_table *table, int write,
  206. void __user *buffer, size_t *lenp,
  207. loff_t *ppos);
  208. #endif
  209. struct ftrace_func_command {
  210. struct list_head list;
  211. char *name;
  212. int (*func)(struct ftrace_hash *hash,
  213. char *func, char *cmd,
  214. char *params, int enable);
  215. };
  216. #ifdef CONFIG_DYNAMIC_FTRACE
  217. int ftrace_arch_code_modify_prepare(void);
  218. int ftrace_arch_code_modify_post_process(void);
  219. void ftrace_bug(int err, unsigned long ip);
  220. struct seq_file;
  221. struct ftrace_probe_ops {
  222. void (*func)(unsigned long ip,
  223. unsigned long parent_ip,
  224. void **data);
  225. int (*callback)(unsigned long ip, void **data);
  226. void (*free)(void **data);
  227. int (*print)(struct seq_file *m,
  228. unsigned long ip,
  229. struct ftrace_probe_ops *ops,
  230. void *data);
  231. };
  232. extern int
  233. register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  234. void *data);
  235. extern void
  236. unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  237. void *data);
  238. extern void
  239. unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
  240. extern void unregister_ftrace_function_probe_all(char *glob);
  241. extern int ftrace_text_reserved(void *start, void *end);
  242. /*
  243. * The dyn_ftrace record's flags field is split into two parts.
  244. * the first part which is '0-FTRACE_REF_MAX' is a counter of
  245. * the number of callbacks that have registered the function that
  246. * the dyn_ftrace descriptor represents.
  247. *
  248. * The second part is a mask:
  249. * ENABLED - the function is being traced
  250. * REGS - the record wants the function to save regs
  251. * REGS_EN - the function is set up to save regs.
  252. *
  253. * When a new ftrace_ops is registered and wants a function to save
  254. * pt_regs, the rec->flag REGS is set. When the function has been
  255. * set up to save regs, the REG_EN flag is set. Once a function
  256. * starts saving regs it will do so until all ftrace_ops are removed
  257. * from tracing that function.
  258. */
  259. enum {
  260. FTRACE_FL_ENABLED = (1UL << 29),
  261. FTRACE_FL_REGS = (1UL << 30),
  262. FTRACE_FL_REGS_EN = (1UL << 31)
  263. };
  264. #define FTRACE_FL_MASK (0x7UL << 29)
  265. #define FTRACE_REF_MAX ((1UL << 29) - 1)
  266. struct dyn_ftrace {
  267. union {
  268. unsigned long ip; /* address of mcount call-site */
  269. struct dyn_ftrace *freelist;
  270. };
  271. unsigned long flags;
  272. struct dyn_arch_ftrace arch;
  273. };
  274. int ftrace_force_update(void);
  275. int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
  276. int len, int reset);
  277. int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
  278. int len, int reset);
  279. void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  280. void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  281. void ftrace_free_filter(struct ftrace_ops *ops);
  282. int register_ftrace_command(struct ftrace_func_command *cmd);
  283. int unregister_ftrace_command(struct ftrace_func_command *cmd);
  284. enum {
  285. FTRACE_UPDATE_CALLS = (1 << 0),
  286. FTRACE_DISABLE_CALLS = (1 << 1),
  287. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  288. FTRACE_START_FUNC_RET = (1 << 3),
  289. FTRACE_STOP_FUNC_RET = (1 << 4),
  290. };
  291. /*
  292. * The FTRACE_UPDATE_* enum is used to pass information back
  293. * from the ftrace_update_record() and ftrace_test_record()
  294. * functions. These are called by the code update routines
  295. * to find out what is to be done for a given function.
  296. *
  297. * IGNORE - The function is already what we want it to be
  298. * MAKE_CALL - Start tracing the function
  299. * MODIFY_CALL - Stop saving regs for the function
  300. * MODIFY_CALL_REGS - Start saving regs for the function
  301. * MAKE_NOP - Stop tracing the function
  302. */
  303. enum {
  304. FTRACE_UPDATE_IGNORE,
  305. FTRACE_UPDATE_MAKE_CALL,
  306. FTRACE_UPDATE_MODIFY_CALL,
  307. FTRACE_UPDATE_MODIFY_CALL_REGS,
  308. FTRACE_UPDATE_MAKE_NOP,
  309. };
  310. enum {
  311. FTRACE_ITER_FILTER = (1 << 0),
  312. FTRACE_ITER_NOTRACE = (1 << 1),
  313. FTRACE_ITER_PRINTALL = (1 << 2),
  314. FTRACE_ITER_DO_HASH = (1 << 3),
  315. FTRACE_ITER_HASH = (1 << 4),
  316. FTRACE_ITER_ENABLED = (1 << 5),
  317. };
  318. void arch_ftrace_update_code(int command);
  319. struct ftrace_rec_iter;
  320. struct ftrace_rec_iter *ftrace_rec_iter_start(void);
  321. struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
  322. struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
  323. #define for_ftrace_rec_iter(iter) \
  324. for (iter = ftrace_rec_iter_start(); \
  325. iter; \
  326. iter = ftrace_rec_iter_next(iter))
  327. int ftrace_update_record(struct dyn_ftrace *rec, int enable);
  328. int ftrace_test_record(struct dyn_ftrace *rec, int enable);
  329. void ftrace_run_stop_machine(int command);
  330. unsigned long ftrace_location(unsigned long ip);
  331. extern ftrace_func_t ftrace_trace_function;
  332. int ftrace_regex_open(struct ftrace_ops *ops, int flag,
  333. struct inode *inode, struct file *file);
  334. ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  335. size_t cnt, loff_t *ppos);
  336. ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  337. size_t cnt, loff_t *ppos);
  338. loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
  339. int ftrace_regex_release(struct inode *inode, struct file *file);
  340. void __init
  341. ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
  342. /* defined in arch */
  343. extern int ftrace_ip_converted(unsigned long ip);
  344. extern int ftrace_dyn_arch_init(void *data);
  345. extern void ftrace_replace_code(int enable);
  346. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  347. extern void ftrace_caller(void);
  348. extern void ftrace_regs_caller(void);
  349. extern void ftrace_call(void);
  350. extern void ftrace_regs_call(void);
  351. extern void mcount_call(void);
  352. void ftrace_modify_all_code(int command);
  353. #ifndef FTRACE_ADDR
  354. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  355. #endif
  356. #ifndef FTRACE_REGS_ADDR
  357. #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
  358. # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
  359. #else
  360. # define FTRACE_REGS_ADDR FTRACE_ADDR
  361. #endif
  362. #endif
  363. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  364. extern void ftrace_graph_caller(void);
  365. extern int ftrace_enable_ftrace_graph_caller(void);
  366. extern int ftrace_disable_ftrace_graph_caller(void);
  367. #else
  368. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  369. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  370. #endif
  371. /**
  372. * ftrace_make_nop - convert code into nop
  373. * @mod: module structure if called by module load initialization
  374. * @rec: the mcount call site record
  375. * @addr: the address that the call site should be calling
  376. *
  377. * This is a very sensitive operation and great care needs
  378. * to be taken by the arch. The operation should carefully
  379. * read the location, check to see if what is read is indeed
  380. * what we expect it to be, and then on success of the compare,
  381. * it should write to the location.
  382. *
  383. * The code segment at @rec->ip should be a caller to @addr
  384. *
  385. * Return must be:
  386. * 0 on success
  387. * -EFAULT on error reading the location
  388. * -EINVAL on a failed compare of the contents
  389. * -EPERM on error writing to the location
  390. * Any other value will be considered a failure.
  391. */
  392. extern int ftrace_make_nop(struct module *mod,
  393. struct dyn_ftrace *rec, unsigned long addr);
  394. /**
  395. * ftrace_make_call - convert a nop call site into a call to addr
  396. * @rec: the mcount call site record
  397. * @addr: the address that the call site should call
  398. *
  399. * This is a very sensitive operation and great care needs
  400. * to be taken by the arch. The operation should carefully
  401. * read the location, check to see if what is read is indeed
  402. * what we expect it to be, and then on success of the compare,
  403. * it should write to the location.
  404. *
  405. * The code segment at @rec->ip should be a nop
  406. *
  407. * Return must be:
  408. * 0 on success
  409. * -EFAULT on error reading the location
  410. * -EINVAL on a failed compare of the contents
  411. * -EPERM on error writing to the location
  412. * Any other value will be considered a failure.
  413. */
  414. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  415. #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
  416. /**
  417. * ftrace_modify_call - convert from one addr to another (no nop)
  418. * @rec: the mcount call site record
  419. * @old_addr: the address expected to be currently called to
  420. * @addr: the address to change to
  421. *
  422. * This is a very sensitive operation and great care needs
  423. * to be taken by the arch. The operation should carefully
  424. * read the location, check to see if what is read is indeed
  425. * what we expect it to be, and then on success of the compare,
  426. * it should write to the location.
  427. *
  428. * The code segment at @rec->ip should be a caller to @old_addr
  429. *
  430. * Return must be:
  431. * 0 on success
  432. * -EFAULT on error reading the location
  433. * -EINVAL on a failed compare of the contents
  434. * -EPERM on error writing to the location
  435. * Any other value will be considered a failure.
  436. */
  437. extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  438. unsigned long addr);
  439. #else
  440. /* Should never be called */
  441. static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  442. unsigned long addr)
  443. {
  444. return -EINVAL;
  445. }
  446. #endif
  447. /* May be defined in arch */
  448. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  449. extern int skip_trace(unsigned long ip);
  450. extern void ftrace_disable_daemon(void);
  451. extern void ftrace_enable_daemon(void);
  452. #else
  453. static inline int skip_trace(unsigned long ip) { return 0; }
  454. static inline int ftrace_force_update(void) { return 0; }
  455. static inline void ftrace_disable_daemon(void) { }
  456. static inline void ftrace_enable_daemon(void) { }
  457. static inline void ftrace_release_mod(struct module *mod) {}
  458. static inline int register_ftrace_command(struct ftrace_func_command *cmd)
  459. {
  460. return -EINVAL;
  461. }
  462. static inline int unregister_ftrace_command(char *cmd_name)
  463. {
  464. return -EINVAL;
  465. }
  466. static inline int ftrace_text_reserved(void *start, void *end)
  467. {
  468. return 0;
  469. }
  470. /*
  471. * Again users of functions that have ftrace_ops may not
  472. * have them defined when ftrace is not enabled, but these
  473. * functions may still be called. Use a macro instead of inline.
  474. */
  475. #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
  476. #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
  477. #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
  478. #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
  479. #define ftrace_free_filter(ops) do { } while (0)
  480. static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  481. size_t cnt, loff_t *ppos) { return -ENODEV; }
  482. static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  483. size_t cnt, loff_t *ppos) { return -ENODEV; }
  484. static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  485. {
  486. return -ENODEV;
  487. }
  488. static inline int
  489. ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
  490. #endif /* CONFIG_DYNAMIC_FTRACE */
  491. /* totally disable ftrace - can not re-enable after this */
  492. void ftrace_kill(void);
  493. static inline void tracer_disable(void)
  494. {
  495. #ifdef CONFIG_FUNCTION_TRACER
  496. ftrace_enabled = 0;
  497. #endif
  498. }
  499. /*
  500. * Ftrace disable/restore without lock. Some synchronization mechanism
  501. * must be used to prevent ftrace_enabled to be changed between
  502. * disable/restore.
  503. */
  504. static inline int __ftrace_enabled_save(void)
  505. {
  506. #ifdef CONFIG_FUNCTION_TRACER
  507. int saved_ftrace_enabled = ftrace_enabled;
  508. ftrace_enabled = 0;
  509. return saved_ftrace_enabled;
  510. #else
  511. return 0;
  512. #endif
  513. }
  514. static inline void __ftrace_enabled_restore(int enabled)
  515. {
  516. #ifdef CONFIG_FUNCTION_TRACER
  517. ftrace_enabled = enabled;
  518. #endif
  519. }
  520. #ifndef HAVE_ARCH_CALLER_ADDR
  521. # ifdef CONFIG_FRAME_POINTER
  522. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  523. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  524. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  525. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  526. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  527. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  528. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  529. # else
  530. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  531. # define CALLER_ADDR1 0UL
  532. # define CALLER_ADDR2 0UL
  533. # define CALLER_ADDR3 0UL
  534. # define CALLER_ADDR4 0UL
  535. # define CALLER_ADDR5 0UL
  536. # define CALLER_ADDR6 0UL
  537. # endif
  538. #endif /* ifndef HAVE_ARCH_CALLER_ADDR */
  539. #ifdef CONFIG_IRQSOFF_TRACER
  540. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  541. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  542. #else
  543. static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  544. static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
  545. #endif
  546. #ifdef CONFIG_PREEMPT_TRACER
  547. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  548. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  549. #else
  550. /*
  551. * Use defines instead of static inlines because some arches will make code out
  552. * of the CALLER_ADDR, when we really want these to be a real nop.
  553. */
  554. # define trace_preempt_on(a0, a1) do { } while (0)
  555. # define trace_preempt_off(a0, a1) do { } while (0)
  556. #endif
  557. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  558. extern void ftrace_init(void);
  559. #else
  560. static inline void ftrace_init(void) { }
  561. #endif
  562. /*
  563. * Structure that defines an entry function trace.
  564. */
  565. struct ftrace_graph_ent {
  566. unsigned long func; /* Current function */
  567. int depth;
  568. };
  569. /*
  570. * Structure that defines a return function trace.
  571. */
  572. struct ftrace_graph_ret {
  573. unsigned long func; /* Current function */
  574. unsigned long long calltime;
  575. unsigned long long rettime;
  576. /* Number of functions that overran the depth limit for current task */
  577. unsigned long overrun;
  578. int depth;
  579. };
  580. /* Type of the callback handlers for tracing function graph*/
  581. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  582. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  583. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  584. /* for init task */
  585. #define INIT_FTRACE_GRAPH .ret_stack = NULL,
  586. /*
  587. * Stack of return addresses for functions
  588. * of a thread.
  589. * Used in struct thread_info
  590. */
  591. struct ftrace_ret_stack {
  592. unsigned long ret;
  593. unsigned long func;
  594. unsigned long long calltime;
  595. unsigned long long subtime;
  596. unsigned long fp;
  597. };
  598. /*
  599. * Primary handler of a function return.
  600. * It relays on ftrace_return_to_handler.
  601. * Defined in entry_32/64.S
  602. */
  603. extern void return_to_handler(void);
  604. extern int
  605. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  606. unsigned long frame_pointer);
  607. /*
  608. * Sometimes we don't want to trace a function with the function
  609. * graph tracer but we want them to keep traced by the usual function
  610. * tracer if the function graph tracer is not configured.
  611. */
  612. #define __notrace_funcgraph notrace
  613. /*
  614. * We want to which function is an entrypoint of a hardirq.
  615. * That will help us to put a signal on output.
  616. */
  617. #define __irq_entry __attribute__((__section__(".irqentry.text")))
  618. /* Limits of hardirq entrypoints */
  619. extern char __irqentry_text_start[];
  620. extern char __irqentry_text_end[];
  621. #define FTRACE_RETFUNC_DEPTH 50
  622. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  623. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  624. trace_func_graph_ent_t entryfunc);
  625. extern void ftrace_graph_stop(void);
  626. /* The current handlers in use */
  627. extern trace_func_graph_ret_t ftrace_graph_return;
  628. extern trace_func_graph_ent_t ftrace_graph_entry;
  629. extern void unregister_ftrace_graph(void);
  630. extern void ftrace_graph_init_task(struct task_struct *t);
  631. extern void ftrace_graph_exit_task(struct task_struct *t);
  632. extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
  633. static inline int task_curr_ret_stack(struct task_struct *t)
  634. {
  635. return t->curr_ret_stack;
  636. }
  637. static inline void pause_graph_tracing(void)
  638. {
  639. atomic_inc(&current->tracing_graph_pause);
  640. }
  641. static inline void unpause_graph_tracing(void)
  642. {
  643. atomic_dec(&current->tracing_graph_pause);
  644. }
  645. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  646. #define __notrace_funcgraph
  647. #define __irq_entry
  648. #define INIT_FTRACE_GRAPH
  649. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  650. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  651. static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
  652. static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  653. trace_func_graph_ent_t entryfunc)
  654. {
  655. return -1;
  656. }
  657. static inline void unregister_ftrace_graph(void) { }
  658. static inline int task_curr_ret_stack(struct task_struct *tsk)
  659. {
  660. return -1;
  661. }
  662. static inline void pause_graph_tracing(void) { }
  663. static inline void unpause_graph_tracing(void) { }
  664. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  665. #ifdef CONFIG_TRACING
  666. /* flags for current->trace */
  667. enum {
  668. TSK_TRACE_FL_TRACE_BIT = 0,
  669. TSK_TRACE_FL_GRAPH_BIT = 1,
  670. };
  671. enum {
  672. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  673. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  674. };
  675. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  676. {
  677. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  678. }
  679. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  680. {
  681. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  682. }
  683. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  684. {
  685. return tsk->trace & TSK_TRACE_FL_TRACE;
  686. }
  687. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  688. {
  689. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  690. }
  691. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  692. {
  693. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  694. }
  695. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  696. {
  697. return tsk->trace & TSK_TRACE_FL_GRAPH;
  698. }
  699. enum ftrace_dump_mode;
  700. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  701. #ifdef CONFIG_PREEMPT
  702. #define INIT_TRACE_RECURSION .trace_recursion = 0,
  703. #endif
  704. #endif /* CONFIG_TRACING */
  705. #ifndef INIT_TRACE_RECURSION
  706. #define INIT_TRACE_RECURSION
  707. #endif
  708. #ifdef CONFIG_FTRACE_SYSCALLS
  709. unsigned long arch_syscall_addr(int nr);
  710. #endif /* CONFIG_FTRACE_SYSCALLS */
  711. #endif /* _LINUX_FTRACE_H */