ftrace.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * Ftrace header. For implementation details beyond the random comments
  3. * scattered below, see: Documentation/trace/ftrace-design.txt
  4. */
  5. #ifndef _LINUX_FTRACE_H
  6. #define _LINUX_FTRACE_H
  7. #include <linux/trace_clock.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/linkage.h>
  10. #include <linux/bitops.h>
  11. #include <linux/ktime.h>
  12. #include <linux/sched.h>
  13. #include <linux/types.h>
  14. #include <linux/init.h>
  15. #include <linux/fs.h>
  16. #include <asm/ftrace.h>
  17. /*
  18. * If the arch supports passing the variable contents of
  19. * function_trace_op as the third parameter back from the
  20. * mcount call, then the arch should define this as 1.
  21. */
  22. #ifndef ARCH_SUPPORTS_FTRACE_OPS
  23. #define ARCH_SUPPORTS_FTRACE_OPS 0
  24. #endif
  25. struct module;
  26. struct ftrace_hash;
  27. #ifdef CONFIG_FUNCTION_TRACER
  28. extern int ftrace_enabled;
  29. extern int
  30. ftrace_enable_sysctl(struct ctl_table *table, int write,
  31. void __user *buffer, size_t *lenp,
  32. loff_t *ppos);
  33. struct ftrace_ops;
  34. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  35. struct ftrace_ops *op);
  36. /*
  37. * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  38. * set in the flags member.
  39. *
  40. * ENABLED - set/unset when ftrace_ops is registered/unregistered
  41. * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
  42. * is part of the global tracers sharing the same filter
  43. * via set_ftrace_* debugfs files.
  44. * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  45. * allocated ftrace_ops which need special care
  46. * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
  47. * could be controled by following calls:
  48. * ftrace_function_local_enable
  49. * ftrace_function_local_disable
  50. */
  51. enum {
  52. FTRACE_OPS_FL_ENABLED = 1 << 0,
  53. FTRACE_OPS_FL_GLOBAL = 1 << 1,
  54. FTRACE_OPS_FL_DYNAMIC = 1 << 2,
  55. FTRACE_OPS_FL_CONTROL = 1 << 3,
  56. };
  57. struct ftrace_ops {
  58. ftrace_func_t func;
  59. struct ftrace_ops *next;
  60. unsigned long flags;
  61. int __percpu *disabled;
  62. #ifdef CONFIG_DYNAMIC_FTRACE
  63. struct ftrace_hash *notrace_hash;
  64. struct ftrace_hash *filter_hash;
  65. #endif
  66. };
  67. extern int function_trace_stop;
  68. /*
  69. * Type of the current tracing.
  70. */
  71. enum ftrace_tracing_type_t {
  72. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  73. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  74. };
  75. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  76. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  77. /**
  78. * ftrace_stop - stop function tracer.
  79. *
  80. * A quick way to stop the function tracer. Note this an on off switch,
  81. * it is not something that is recursive like preempt_disable.
  82. * This does not disable the calling of mcount, it only stops the
  83. * calling of functions from mcount.
  84. */
  85. static inline void ftrace_stop(void)
  86. {
  87. function_trace_stop = 1;
  88. }
  89. /**
  90. * ftrace_start - start the function tracer.
  91. *
  92. * This function is the inverse of ftrace_stop. This does not enable
  93. * the function tracing if the function tracer is disabled. This only
  94. * sets the function tracer flag to continue calling the functions
  95. * from mcount.
  96. */
  97. static inline void ftrace_start(void)
  98. {
  99. function_trace_stop = 0;
  100. }
  101. /*
  102. * The ftrace_ops must be a static and should also
  103. * be read_mostly. These functions do modify read_mostly variables
  104. * so use them sparely. Never free an ftrace_op or modify the
  105. * next pointer after it has been registered. Even after unregistering
  106. * it, the next pointer may still be used internally.
  107. */
  108. int register_ftrace_function(struct ftrace_ops *ops);
  109. int unregister_ftrace_function(struct ftrace_ops *ops);
  110. void clear_ftrace_function(void);
  111. /**
  112. * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
  113. *
  114. * This function enables tracing on current cpu by decreasing
  115. * the per cpu control variable.
  116. * It must be called with preemption disabled and only on ftrace_ops
  117. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  118. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  119. */
  120. static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
  121. {
  122. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  123. return;
  124. (*this_cpu_ptr(ops->disabled))--;
  125. }
  126. /**
  127. * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
  128. *
  129. * This function enables tracing on current cpu by decreasing
  130. * the per cpu control variable.
  131. * It must be called with preemption disabled and only on ftrace_ops
  132. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  133. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  134. */
  135. static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
  136. {
  137. if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
  138. return;
  139. (*this_cpu_ptr(ops->disabled))++;
  140. }
  141. /**
  142. * ftrace_function_local_disabled - returns ftrace_ops disabled value
  143. * on current cpu
  144. *
  145. * This function returns value of ftrace_ops::disabled on current cpu.
  146. * It must be called with preemption disabled and only on ftrace_ops
  147. * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
  148. * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
  149. */
  150. static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
  151. {
  152. WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
  153. return *this_cpu_ptr(ops->disabled);
  154. }
  155. extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op);
  156. #else /* !CONFIG_FUNCTION_TRACER */
  157. /*
  158. * (un)register_ftrace_function must be a macro since the ops parameter
  159. * must not be evaluated.
  160. */
  161. #define register_ftrace_function(ops) ({ 0; })
  162. #define unregister_ftrace_function(ops) ({ 0; })
  163. static inline void clear_ftrace_function(void) { }
  164. static inline void ftrace_kill(void) { }
  165. static inline void ftrace_stop(void) { }
  166. static inline void ftrace_start(void) { }
  167. #endif /* CONFIG_FUNCTION_TRACER */
  168. #ifdef CONFIG_STACK_TRACER
  169. extern int stack_tracer_enabled;
  170. int
  171. stack_trace_sysctl(struct ctl_table *table, int write,
  172. void __user *buffer, size_t *lenp,
  173. loff_t *ppos);
  174. #endif
  175. struct ftrace_func_command {
  176. struct list_head list;
  177. char *name;
  178. int (*func)(struct ftrace_hash *hash,
  179. char *func, char *cmd,
  180. char *params, int enable);
  181. };
  182. #ifdef CONFIG_DYNAMIC_FTRACE
  183. int ftrace_arch_code_modify_prepare(void);
  184. int ftrace_arch_code_modify_post_process(void);
  185. void ftrace_bug(int err, unsigned long ip);
  186. struct seq_file;
  187. struct ftrace_probe_ops {
  188. void (*func)(unsigned long ip,
  189. unsigned long parent_ip,
  190. void **data);
  191. int (*callback)(unsigned long ip, void **data);
  192. void (*free)(void **data);
  193. int (*print)(struct seq_file *m,
  194. unsigned long ip,
  195. struct ftrace_probe_ops *ops,
  196. void *data);
  197. };
  198. extern int
  199. register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  200. void *data);
  201. extern void
  202. unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  203. void *data);
  204. extern void
  205. unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
  206. extern void unregister_ftrace_function_probe_all(char *glob);
  207. extern int ftrace_text_reserved(void *start, void *end);
  208. enum {
  209. FTRACE_FL_ENABLED = (1 << 30),
  210. };
  211. #define FTRACE_FL_MASK (0x3UL << 30)
  212. #define FTRACE_REF_MAX ((1 << 30) - 1)
  213. struct dyn_ftrace {
  214. union {
  215. unsigned long ip; /* address of mcount call-site */
  216. struct dyn_ftrace *freelist;
  217. };
  218. unsigned long flags;
  219. struct dyn_arch_ftrace arch;
  220. };
  221. int ftrace_force_update(void);
  222. int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
  223. int len, int reset);
  224. int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
  225. int len, int reset);
  226. void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  227. void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  228. void ftrace_free_filter(struct ftrace_ops *ops);
  229. int register_ftrace_command(struct ftrace_func_command *cmd);
  230. int unregister_ftrace_command(struct ftrace_func_command *cmd);
  231. enum {
  232. FTRACE_UPDATE_CALLS = (1 << 0),
  233. FTRACE_DISABLE_CALLS = (1 << 1),
  234. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  235. FTRACE_START_FUNC_RET = (1 << 3),
  236. FTRACE_STOP_FUNC_RET = (1 << 4),
  237. };
  238. enum {
  239. FTRACE_UPDATE_IGNORE,
  240. FTRACE_UPDATE_MAKE_CALL,
  241. FTRACE_UPDATE_MAKE_NOP,
  242. };
  243. enum {
  244. FTRACE_ITER_FILTER = (1 << 0),
  245. FTRACE_ITER_NOTRACE = (1 << 1),
  246. FTRACE_ITER_PRINTALL = (1 << 2),
  247. FTRACE_ITER_DO_HASH = (1 << 3),
  248. FTRACE_ITER_HASH = (1 << 4),
  249. FTRACE_ITER_ENABLED = (1 << 5),
  250. };
  251. void arch_ftrace_update_code(int command);
  252. struct ftrace_rec_iter;
  253. struct ftrace_rec_iter *ftrace_rec_iter_start(void);
  254. struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
  255. struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
  256. #define for_ftrace_rec_iter(iter) \
  257. for (iter = ftrace_rec_iter_start(); \
  258. iter; \
  259. iter = ftrace_rec_iter_next(iter))
  260. int ftrace_update_record(struct dyn_ftrace *rec, int enable);
  261. int ftrace_test_record(struct dyn_ftrace *rec, int enable);
  262. void ftrace_run_stop_machine(int command);
  263. unsigned long ftrace_location(unsigned long ip);
  264. extern ftrace_func_t ftrace_trace_function;
  265. int ftrace_regex_open(struct ftrace_ops *ops, int flag,
  266. struct inode *inode, struct file *file);
  267. ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  268. size_t cnt, loff_t *ppos);
  269. ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  270. size_t cnt, loff_t *ppos);
  271. loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin);
  272. int ftrace_regex_release(struct inode *inode, struct file *file);
  273. void __init
  274. ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
  275. /* defined in arch */
  276. extern int ftrace_ip_converted(unsigned long ip);
  277. extern int ftrace_dyn_arch_init(void *data);
  278. extern void ftrace_replace_code(int enable);
  279. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  280. extern void ftrace_caller(void);
  281. extern void ftrace_call(void);
  282. extern void mcount_call(void);
  283. void ftrace_modify_all_code(int command);
  284. #ifndef FTRACE_ADDR
  285. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  286. #endif
  287. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  288. extern void ftrace_graph_caller(void);
  289. extern int ftrace_enable_ftrace_graph_caller(void);
  290. extern int ftrace_disable_ftrace_graph_caller(void);
  291. #else
  292. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  293. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  294. #endif
  295. /**
  296. * ftrace_make_nop - convert code into nop
  297. * @mod: module structure if called by module load initialization
  298. * @rec: the mcount call site record
  299. * @addr: the address that the call site should be calling
  300. *
  301. * This is a very sensitive operation and great care needs
  302. * to be taken by the arch. The operation should carefully
  303. * read the location, check to see if what is read is indeed
  304. * what we expect it to be, and then on success of the compare,
  305. * it should write to the location.
  306. *
  307. * The code segment at @rec->ip should be a caller to @addr
  308. *
  309. * Return must be:
  310. * 0 on success
  311. * -EFAULT on error reading the location
  312. * -EINVAL on a failed compare of the contents
  313. * -EPERM on error writing to the location
  314. * Any other value will be considered a failure.
  315. */
  316. extern int ftrace_make_nop(struct module *mod,
  317. struct dyn_ftrace *rec, unsigned long addr);
  318. /**
  319. * ftrace_make_call - convert a nop call site into a call to addr
  320. * @rec: the mcount call site record
  321. * @addr: the address that the call site should call
  322. *
  323. * This is a very sensitive operation and great care needs
  324. * to be taken by the arch. The operation should carefully
  325. * read the location, check to see if what is read is indeed
  326. * what we expect it to be, and then on success of the compare,
  327. * it should write to the location.
  328. *
  329. * The code segment at @rec->ip should be a nop
  330. *
  331. * Return must be:
  332. * 0 on success
  333. * -EFAULT on error reading the location
  334. * -EINVAL on a failed compare of the contents
  335. * -EPERM on error writing to the location
  336. * Any other value will be considered a failure.
  337. */
  338. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  339. /* May be defined in arch */
  340. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  341. extern int skip_trace(unsigned long ip);
  342. extern void ftrace_disable_daemon(void);
  343. extern void ftrace_enable_daemon(void);
  344. #else
  345. static inline int skip_trace(unsigned long ip) { return 0; }
  346. static inline int ftrace_force_update(void) { return 0; }
  347. static inline void ftrace_disable_daemon(void) { }
  348. static inline void ftrace_enable_daemon(void) { }
  349. static inline void ftrace_release_mod(struct module *mod) {}
  350. static inline int register_ftrace_command(struct ftrace_func_command *cmd)
  351. {
  352. return -EINVAL;
  353. }
  354. static inline int unregister_ftrace_command(char *cmd_name)
  355. {
  356. return -EINVAL;
  357. }
  358. static inline int ftrace_text_reserved(void *start, void *end)
  359. {
  360. return 0;
  361. }
  362. /*
  363. * Again users of functions that have ftrace_ops may not
  364. * have them defined when ftrace is not enabled, but these
  365. * functions may still be called. Use a macro instead of inline.
  366. */
  367. #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
  368. #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
  369. #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
  370. #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
  371. #define ftrace_free_filter(ops) do { } while (0)
  372. static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  373. size_t cnt, loff_t *ppos) { return -ENODEV; }
  374. static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  375. size_t cnt, loff_t *ppos) { return -ENODEV; }
  376. static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
  377. {
  378. return -ENODEV;
  379. }
  380. static inline int
  381. ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
  382. #endif /* CONFIG_DYNAMIC_FTRACE */
  383. /* totally disable ftrace - can not re-enable after this */
  384. void ftrace_kill(void);
  385. static inline void tracer_disable(void)
  386. {
  387. #ifdef CONFIG_FUNCTION_TRACER
  388. ftrace_enabled = 0;
  389. #endif
  390. }
  391. /*
  392. * Ftrace disable/restore without lock. Some synchronization mechanism
  393. * must be used to prevent ftrace_enabled to be changed between
  394. * disable/restore.
  395. */
  396. static inline int __ftrace_enabled_save(void)
  397. {
  398. #ifdef CONFIG_FUNCTION_TRACER
  399. int saved_ftrace_enabled = ftrace_enabled;
  400. ftrace_enabled = 0;
  401. return saved_ftrace_enabled;
  402. #else
  403. return 0;
  404. #endif
  405. }
  406. static inline void __ftrace_enabled_restore(int enabled)
  407. {
  408. #ifdef CONFIG_FUNCTION_TRACER
  409. ftrace_enabled = enabled;
  410. #endif
  411. }
  412. #ifndef HAVE_ARCH_CALLER_ADDR
  413. # ifdef CONFIG_FRAME_POINTER
  414. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  415. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  416. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  417. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  418. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  419. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  420. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  421. # else
  422. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  423. # define CALLER_ADDR1 0UL
  424. # define CALLER_ADDR2 0UL
  425. # define CALLER_ADDR3 0UL
  426. # define CALLER_ADDR4 0UL
  427. # define CALLER_ADDR5 0UL
  428. # define CALLER_ADDR6 0UL
  429. # endif
  430. #endif /* ifndef HAVE_ARCH_CALLER_ADDR */
  431. #ifdef CONFIG_IRQSOFF_TRACER
  432. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  433. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  434. #else
  435. static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  436. static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
  437. #endif
  438. #ifdef CONFIG_PREEMPT_TRACER
  439. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  440. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  441. #else
  442. /*
  443. * Use defines instead of static inlines because some arches will make code out
  444. * of the CALLER_ADDR, when we really want these to be a real nop.
  445. */
  446. # define trace_preempt_on(a0, a1) do { } while (0)
  447. # define trace_preempt_off(a0, a1) do { } while (0)
  448. #endif
  449. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  450. extern void ftrace_init(void);
  451. #else
  452. static inline void ftrace_init(void) { }
  453. #endif
  454. /*
  455. * Structure that defines an entry function trace.
  456. */
  457. struct ftrace_graph_ent {
  458. unsigned long func; /* Current function */
  459. int depth;
  460. };
  461. /*
  462. * Structure that defines a return function trace.
  463. */
  464. struct ftrace_graph_ret {
  465. unsigned long func; /* Current function */
  466. unsigned long long calltime;
  467. unsigned long long rettime;
  468. /* Number of functions that overran the depth limit for current task */
  469. unsigned long overrun;
  470. int depth;
  471. };
  472. /* Type of the callback handlers for tracing function graph*/
  473. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  474. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  475. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  476. /* for init task */
  477. #define INIT_FTRACE_GRAPH .ret_stack = NULL,
  478. /*
  479. * Stack of return addresses for functions
  480. * of a thread.
  481. * Used in struct thread_info
  482. */
  483. struct ftrace_ret_stack {
  484. unsigned long ret;
  485. unsigned long func;
  486. unsigned long long calltime;
  487. unsigned long long subtime;
  488. unsigned long fp;
  489. };
  490. /*
  491. * Primary handler of a function return.
  492. * It relays on ftrace_return_to_handler.
  493. * Defined in entry_32/64.S
  494. */
  495. extern void return_to_handler(void);
  496. extern int
  497. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  498. unsigned long frame_pointer);
  499. /*
  500. * Sometimes we don't want to trace a function with the function
  501. * graph tracer but we want them to keep traced by the usual function
  502. * tracer if the function graph tracer is not configured.
  503. */
  504. #define __notrace_funcgraph notrace
  505. /*
  506. * We want to which function is an entrypoint of a hardirq.
  507. * That will help us to put a signal on output.
  508. */
  509. #define __irq_entry __attribute__((__section__(".irqentry.text")))
  510. /* Limits of hardirq entrypoints */
  511. extern char __irqentry_text_start[];
  512. extern char __irqentry_text_end[];
  513. #define FTRACE_RETFUNC_DEPTH 50
  514. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  515. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  516. trace_func_graph_ent_t entryfunc);
  517. extern void ftrace_graph_stop(void);
  518. /* The current handlers in use */
  519. extern trace_func_graph_ret_t ftrace_graph_return;
  520. extern trace_func_graph_ent_t ftrace_graph_entry;
  521. extern void unregister_ftrace_graph(void);
  522. extern void ftrace_graph_init_task(struct task_struct *t);
  523. extern void ftrace_graph_exit_task(struct task_struct *t);
  524. extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
  525. static inline int task_curr_ret_stack(struct task_struct *t)
  526. {
  527. return t->curr_ret_stack;
  528. }
  529. static inline void pause_graph_tracing(void)
  530. {
  531. atomic_inc(&current->tracing_graph_pause);
  532. }
  533. static inline void unpause_graph_tracing(void)
  534. {
  535. atomic_dec(&current->tracing_graph_pause);
  536. }
  537. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  538. #define __notrace_funcgraph
  539. #define __irq_entry
  540. #define INIT_FTRACE_GRAPH
  541. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  542. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  543. static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
  544. static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  545. trace_func_graph_ent_t entryfunc)
  546. {
  547. return -1;
  548. }
  549. static inline void unregister_ftrace_graph(void) { }
  550. static inline int task_curr_ret_stack(struct task_struct *tsk)
  551. {
  552. return -1;
  553. }
  554. static inline void pause_graph_tracing(void) { }
  555. static inline void unpause_graph_tracing(void) { }
  556. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  557. #ifdef CONFIG_TRACING
  558. /* flags for current->trace */
  559. enum {
  560. TSK_TRACE_FL_TRACE_BIT = 0,
  561. TSK_TRACE_FL_GRAPH_BIT = 1,
  562. };
  563. enum {
  564. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  565. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  566. };
  567. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  568. {
  569. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  570. }
  571. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  572. {
  573. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  574. }
  575. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  576. {
  577. return tsk->trace & TSK_TRACE_FL_TRACE;
  578. }
  579. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  580. {
  581. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  582. }
  583. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  584. {
  585. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  586. }
  587. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  588. {
  589. return tsk->trace & TSK_TRACE_FL_GRAPH;
  590. }
  591. enum ftrace_dump_mode;
  592. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  593. #ifdef CONFIG_PREEMPT
  594. #define INIT_TRACE_RECURSION .trace_recursion = 0,
  595. #endif
  596. #endif /* CONFIG_TRACING */
  597. #ifndef INIT_TRACE_RECURSION
  598. #define INIT_TRACE_RECURSION
  599. #endif
  600. #ifdef CONFIG_FTRACE_SYSCALLS
  601. unsigned long arch_syscall_addr(int nr);
  602. #endif /* CONFIG_FTRACE_SYSCALLS */
  603. #endif /* _LINUX_FTRACE_H */