tracehook.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Tracing hooks
  3. *
  4. * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
  5. *
  6. * This copyrighted material is made available to anyone wishing to use,
  7. * modify, copy, or redistribute it subject to the terms and conditions
  8. * of the GNU General Public License v.2.
  9. *
  10. * This file defines hook entry points called by core code where
  11. * user tracing/debugging support might need to do something. These
  12. * entry points are called tracehook_*(). Each hook declared below
  13. * has a detailed kerneldoc comment giving the context (locking et
  14. * al) from which it is called, and the meaning of its return value.
  15. *
  16. * Each function here typically has only one call site, so it is ok
  17. * to have some nontrivial tracehook_*() inlines. In all cases, the
  18. * fast path when no tracing is enabled should be very short.
  19. *
  20. * The purpose of this file and the tracehook_* layer is to consolidate
  21. * the interface that the kernel core and arch code uses to enable any
  22. * user debugging or tracing facility (such as ptrace). The interfaces
  23. * here are carefully documented so that maintainers of core and arch
  24. * code do not need to think about the implementation details of the
  25. * tracing facilities. Likewise, maintainers of the tracing code do not
  26. * need to understand all the calling core or arch code in detail, just
  27. * documented circumstances of each call, such as locking conditions.
  28. *
  29. * If the calling core code changes so that locking is different, then
  30. * it is ok to change the interface documented here. The maintainer of
  31. * core code changing should notify the maintainers of the tracing code
  32. * that they need to work out the change.
  33. *
  34. * Some tracehook_*() inlines take arguments that the current tracing
  35. * implementations might not necessarily use. These function signatures
  36. * are chosen to pass in all the information that is on hand in the
  37. * caller and might conceivably be relevant to a tracer, so that the
  38. * core code won't have to be updated when tracing adds more features.
  39. * If a call site changes so that some of those parameters are no longer
  40. * already on hand without extra work, then the tracehook_* interface
  41. * can change so there is no make-work burden on the core code. The
  42. * maintainer of core code changing should notify the maintainers of the
  43. * tracing code that they need to work out the change.
  44. */
  45. #ifndef _LINUX_TRACEHOOK_H
  46. #define _LINUX_TRACEHOOK_H 1
  47. #include <linux/sched.h>
  48. #include <linux/ptrace.h>
  49. #include <linux/security.h>
  50. struct linux_binprm;
  51. /**
  52. * tracehook_expect_breakpoints - guess if task memory might be touched
  53. * @task: current task, making a new mapping
  54. *
  55. * Return nonzero if @task is expected to want breakpoint insertion in
  56. * its memory at some point. A zero return is no guarantee it won't
  57. * be done, but this is a hint that it's known to be likely.
  58. *
  59. * May be called with @task->mm->mmap_sem held for writing.
  60. */
  61. static inline int tracehook_expect_breakpoints(struct task_struct *task)
  62. {
  63. return (task_ptrace(task) & PT_PTRACED) != 0;
  64. }
  65. /*
  66. * ptrace report for syscall entry and exit looks identical.
  67. */
  68. static inline void ptrace_report_syscall(struct pt_regs *regs)
  69. {
  70. int ptrace = task_ptrace(current);
  71. if (!(ptrace & PT_PTRACED))
  72. return;
  73. ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
  74. /*
  75. * this isn't the same as continuing with a signal, but it will do
  76. * for normal use. strace only continues with a signal if the
  77. * stopping signal is not SIGTRAP. -brl
  78. */
  79. if (current->exit_code) {
  80. send_sig(current->exit_code, current, 1);
  81. current->exit_code = 0;
  82. }
  83. }
  84. /**
  85. * tracehook_report_syscall_entry - task is about to attempt a system call
  86. * @regs: user register state of current task
  87. *
  88. * This will be called if %TIF_SYSCALL_TRACE has been set, when the
  89. * current task has just entered the kernel for a system call.
  90. * Full user register state is available here. Changing the values
  91. * in @regs can affect the system call number and arguments to be tried.
  92. * It is safe to block here, preventing the system call from beginning.
  93. *
  94. * Returns zero normally, or nonzero if the calling arch code should abort
  95. * the system call. That must prevent normal entry so no system call is
  96. * made. If @task ever returns to user mode after this, its register state
  97. * is unspecified, but should be something harmless like an %ENOSYS error
  98. * return. It should preserve enough information so that syscall_rollback()
  99. * can work (see asm-generic/syscall.h).
  100. *
  101. * Called without locks, just after entering kernel mode.
  102. */
  103. static inline __must_check int tracehook_report_syscall_entry(
  104. struct pt_regs *regs)
  105. {
  106. ptrace_report_syscall(regs);
  107. return 0;
  108. }
  109. /**
  110. * tracehook_report_syscall_exit - task has just finished a system call
  111. * @regs: user register state of current task
  112. * @step: nonzero if simulating single-step or block-step
  113. *
  114. * This will be called if %TIF_SYSCALL_TRACE has been set, when the
  115. * current task has just finished an attempted system call. Full
  116. * user register state is available here. It is safe to block here,
  117. * preventing signals from being processed.
  118. *
  119. * If @step is nonzero, this report is also in lieu of the normal
  120. * trap that would follow the system call instruction because
  121. * user_enable_block_step() or user_enable_single_step() was used.
  122. * In this case, %TIF_SYSCALL_TRACE might not be set.
  123. *
  124. * Called without locks, just before checking for pending signals.
  125. */
  126. static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
  127. {
  128. if (step) {
  129. siginfo_t info;
  130. user_single_step_siginfo(current, regs, &info);
  131. force_sig_info(SIGTRAP, &info, current);
  132. return;
  133. }
  134. ptrace_report_syscall(regs);
  135. }
  136. /**
  137. * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
  138. * @task: current task doing exec
  139. *
  140. * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
  141. *
  142. * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
  143. */
  144. static inline int tracehook_unsafe_exec(struct task_struct *task)
  145. {
  146. int unsafe = 0;
  147. int ptrace = task_ptrace(task);
  148. if (ptrace & PT_PTRACED) {
  149. if (ptrace & PT_PTRACE_CAP)
  150. unsafe |= LSM_UNSAFE_PTRACE_CAP;
  151. else
  152. unsafe |= LSM_UNSAFE_PTRACE;
  153. }
  154. return unsafe;
  155. }
  156. /**
  157. * tracehook_tracer_task - return the task that is tracing the given task
  158. * @tsk: task to consider
  159. *
  160. * Returns NULL if noone is tracing @task, or the &struct task_struct
  161. * pointer to its tracer.
  162. *
  163. * Must called under rcu_read_lock(). The pointer returned might be kept
  164. * live only by RCU. During exec, this may be called with task_lock()
  165. * held on @task, still held from when tracehook_unsafe_exec() was called.
  166. */
  167. static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
  168. {
  169. if (task_ptrace(tsk) & PT_PTRACED)
  170. return rcu_dereference(tsk->parent);
  171. return NULL;
  172. }
  173. /**
  174. * tracehook_report_exec - a successful exec was completed
  175. * @fmt: &struct linux_binfmt that performed the exec
  176. * @bprm: &struct linux_binprm containing exec details
  177. * @regs: user-mode register state
  178. *
  179. * An exec just completed, we are shortly going to return to user mode.
  180. * The freshly initialized register state can be seen and changed in @regs.
  181. * The name, file and other pointers in @bprm are still on hand to be
  182. * inspected, but will be freed as soon as this returns.
  183. *
  184. * Called with no locks, but with some kernel resources held live
  185. * and a reference on @fmt->module.
  186. */
  187. static inline void tracehook_report_exec(struct linux_binfmt *fmt,
  188. struct linux_binprm *bprm,
  189. struct pt_regs *regs)
  190. {
  191. if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
  192. unlikely(task_ptrace(current) & PT_PTRACED))
  193. send_sig(SIGTRAP, current, 0);
  194. }
  195. /**
  196. * tracehook_report_exit - task has begun to exit
  197. * @exit_code: pointer to value destined for @current->exit_code
  198. *
  199. * @exit_code points to the value passed to do_exit(), which tracing
  200. * might change here. This is almost the first thing in do_exit(),
  201. * before freeing any resources or setting the %PF_EXITING flag.
  202. *
  203. * Called with no locks held.
  204. */
  205. static inline void tracehook_report_exit(long *exit_code)
  206. {
  207. ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
  208. }
  209. /**
  210. * tracehook_prepare_clone - prepare for new child to be cloned
  211. * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
  212. *
  213. * This is called before a new user task is to be cloned.
  214. * Its return value will be passed to tracehook_finish_clone().
  215. *
  216. * Called with no locks held.
  217. */
  218. static inline int tracehook_prepare_clone(unsigned clone_flags)
  219. {
  220. if (clone_flags & CLONE_UNTRACED)
  221. return 0;
  222. if (clone_flags & CLONE_VFORK) {
  223. if (current->ptrace & PT_TRACE_VFORK)
  224. return PTRACE_EVENT_VFORK;
  225. } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
  226. if (current->ptrace & PT_TRACE_CLONE)
  227. return PTRACE_EVENT_CLONE;
  228. } else if (current->ptrace & PT_TRACE_FORK)
  229. return PTRACE_EVENT_FORK;
  230. return 0;
  231. }
  232. /**
  233. * tracehook_finish_clone - new child created and being attached
  234. * @child: new child task
  235. * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
  236. * @trace: return value from tracehook_prepare_clone()
  237. *
  238. * This is called immediately after adding @child to its parent's children list.
  239. * The @trace value is that returned by tracehook_prepare_clone().
  240. *
  241. * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
  242. */
  243. static inline void tracehook_finish_clone(struct task_struct *child,
  244. unsigned long clone_flags, int trace)
  245. {
  246. ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
  247. }
  248. /**
  249. * tracehook_report_clone - in parent, new child is about to start running
  250. * @regs: parent's user register state
  251. * @clone_flags: flags from parent's system call
  252. * @pid: new child's PID in the parent's namespace
  253. * @child: new child task
  254. *
  255. * Called after a child is set up, but before it has been started running.
  256. * This is not a good place to block, because the child has not started
  257. * yet. Suspend the child here if desired, and then block in
  258. * tracehook_report_clone_complete(). This must prevent the child from
  259. * self-reaping if tracehook_report_clone_complete() uses the @child
  260. * pointer; otherwise it might have died and been released by the time
  261. * tracehook_report_clone_complete() is called.
  262. *
  263. * Called with no locks held, but the child cannot run until this returns.
  264. */
  265. static inline void tracehook_report_clone(struct pt_regs *regs,
  266. unsigned long clone_flags,
  267. pid_t pid, struct task_struct *child)
  268. {
  269. if (unlikely(task_ptrace(child))) {
  270. /*
  271. * It doesn't matter who attached/attaching to this
  272. * task, the pending SIGSTOP is right in any case.
  273. */
  274. sigaddset(&child->pending.signal, SIGSTOP);
  275. set_tsk_thread_flag(child, TIF_SIGPENDING);
  276. }
  277. }
  278. /**
  279. * tracehook_report_clone_complete - new child is running
  280. * @trace: return value from tracehook_prepare_clone()
  281. * @regs: parent's user register state
  282. * @clone_flags: flags from parent's system call
  283. * @pid: new child's PID in the parent's namespace
  284. * @child: child task, already running
  285. *
  286. * This is called just after the child has started running. This is
  287. * just before the clone/fork syscall returns, or blocks for vfork
  288. * child completion if @clone_flags has the %CLONE_VFORK bit set.
  289. * The @child pointer may be invalid if a self-reaping child died and
  290. * tracehook_report_clone() took no action to prevent it from self-reaping.
  291. *
  292. * Called with no locks held.
  293. */
  294. static inline void tracehook_report_clone_complete(int trace,
  295. struct pt_regs *regs,
  296. unsigned long clone_flags,
  297. pid_t pid,
  298. struct task_struct *child)
  299. {
  300. if (unlikely(trace))
  301. ptrace_event(0, trace, pid);
  302. }
  303. /**
  304. * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
  305. * @child: child task, already running
  306. * @pid: new child's PID in the parent's namespace
  307. *
  308. * Called after a %CLONE_VFORK parent has waited for the child to complete.
  309. * The clone/vfork system call will return immediately after this.
  310. * The @child pointer may be invalid if a self-reaping child died and
  311. * tracehook_report_clone() took no action to prevent it from self-reaping.
  312. *
  313. * Called with no locks held.
  314. */
  315. static inline void tracehook_report_vfork_done(struct task_struct *child,
  316. pid_t pid)
  317. {
  318. ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
  319. }
  320. /**
  321. * tracehook_prepare_release_task - task is being reaped, clean up tracing
  322. * @task: task in %EXIT_DEAD state
  323. *
  324. * This is called in release_task() just before @task gets finally reaped
  325. * and freed. This would be the ideal place to remove and clean up any
  326. * tracing-related state for @task.
  327. *
  328. * Called with no locks held.
  329. */
  330. static inline void tracehook_prepare_release_task(struct task_struct *task)
  331. {
  332. }
  333. /**
  334. * tracehook_finish_release_task - final tracing clean-up
  335. * @task: task in %EXIT_DEAD state
  336. *
  337. * This is called in release_task() when @task is being in the middle of
  338. * being reaped. After this, there must be no tracing entanglements.
  339. *
  340. * Called with write_lock_irq(&tasklist_lock) held.
  341. */
  342. static inline void tracehook_finish_release_task(struct task_struct *task)
  343. {
  344. ptrace_release_task(task);
  345. }
  346. /**
  347. * tracehook_signal_handler - signal handler setup is complete
  348. * @sig: number of signal being delivered
  349. * @info: siginfo_t of signal being delivered
  350. * @ka: sigaction setting that chose the handler
  351. * @regs: user register state
  352. * @stepping: nonzero if debugger single-step or block-step in use
  353. *
  354. * Called by the arch code after a signal handler has been set up.
  355. * Register and stack state reflects the user handler about to run.
  356. * Signal mask changes have already been made.
  357. *
  358. * Called without locks, shortly before returning to user mode
  359. * (or handling more signals).
  360. */
  361. static inline void tracehook_signal_handler(int sig, siginfo_t *info,
  362. const struct k_sigaction *ka,
  363. struct pt_regs *regs, int stepping)
  364. {
  365. if (stepping)
  366. ptrace_notify(SIGTRAP);
  367. }
  368. /**
  369. * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
  370. * @task: task receiving the signal
  371. * @sig: signal number being sent
  372. *
  373. * Return zero iff tracing doesn't care to examine this ignored signal,
  374. * so it can short-circuit normal delivery and never even get queued.
  375. *
  376. * Called with @task->sighand->siglock held.
  377. */
  378. static inline int tracehook_consider_ignored_signal(struct task_struct *task,
  379. int sig)
  380. {
  381. return (task_ptrace(task) & PT_PTRACED) != 0;
  382. }
  383. /**
  384. * tracehook_consider_fatal_signal - suppress special handling of fatal signal
  385. * @task: task receiving the signal
  386. * @sig: signal number being sent
  387. *
  388. * Return nonzero to prevent special handling of this termination signal.
  389. * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
  390. * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
  391. * When this returns zero, this signal might cause a quick termination
  392. * that does not give the debugger a chance to intercept the signal.
  393. *
  394. * Called with or without @task->sighand->siglock held.
  395. */
  396. static inline int tracehook_consider_fatal_signal(struct task_struct *task,
  397. int sig)
  398. {
  399. return (task_ptrace(task) & PT_PTRACED) != 0;
  400. }
  401. /**
  402. * tracehook_force_sigpending - let tracing force signal_pending(current) on
  403. *
  404. * Called when recomputing our signal_pending() flag. Return nonzero
  405. * to force the signal_pending() flag on, so that tracehook_get_signal()
  406. * will be called before the next return to user mode.
  407. *
  408. * Called with @current->sighand->siglock held.
  409. */
  410. static inline int tracehook_force_sigpending(void)
  411. {
  412. return 0;
  413. }
  414. /**
  415. * tracehook_get_signal - deliver synthetic signal to traced task
  416. * @task: @current
  417. * @regs: task_pt_regs(@current)
  418. * @info: details of synthetic signal
  419. * @return_ka: sigaction for synthetic signal
  420. *
  421. * Return zero to check for a real pending signal normally.
  422. * Return -1 after releasing the siglock to repeat the check.
  423. * Return a signal number to induce an artifical signal delivery,
  424. * setting *@info and *@return_ka to specify its details and behavior.
  425. *
  426. * The @return_ka->sa_handler value controls the disposition of the
  427. * signal, no matter the signal number. For %SIG_DFL, the return value
  428. * is a representative signal to indicate the behavior (e.g. %SIGTERM
  429. * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
  430. * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
  431. * reported will be @info->si_signo instead.
  432. *
  433. * Called with @task->sighand->siglock held, before dequeuing pending signals.
  434. */
  435. static inline int tracehook_get_signal(struct task_struct *task,
  436. struct pt_regs *regs,
  437. siginfo_t *info,
  438. struct k_sigaction *return_ka)
  439. {
  440. return 0;
  441. }
  442. /**
  443. * tracehook_notify_jctl - report about job control stop/continue
  444. * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED
  445. * @why: %CLD_STOPPED or %CLD_CONTINUED
  446. *
  447. * This is called when we might call do_notify_parent_cldstop().
  448. *
  449. * @notify is zero if we would not ordinarily send a %SIGCHLD,
  450. * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
  451. *
  452. * @why is %CLD_STOPPED when about to stop for job control;
  453. * we are already in %TASK_STOPPED state, about to call schedule().
  454. * It might also be that we have just exited (check %PF_EXITING),
  455. * but need to report that a group-wide stop is complete.
  456. *
  457. * @why is %CLD_CONTINUED when waking up after job control stop and
  458. * ready to make a delayed @notify report.
  459. *
  460. * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
  461. *
  462. * Called with the siglock held.
  463. */
  464. static inline int tracehook_notify_jctl(int notify, int why)
  465. {
  466. return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
  467. }
  468. /**
  469. * tracehook_finish_jctl - report about return from job control stop
  470. *
  471. * This is called by do_signal_stop() after wakeup.
  472. */
  473. static inline void tracehook_finish_jctl(void)
  474. {
  475. }
  476. #define DEATH_REAP -1
  477. #define DEATH_DELAYED_GROUP_LEADER -2
  478. /**
  479. * tracehook_notify_death - task is dead, ready to notify parent
  480. * @task: @current task now exiting
  481. * @death_cookie: value to pass to tracehook_report_death()
  482. * @group_dead: nonzero if this was the last thread in the group to die
  483. *
  484. * A return value >= 0 means call do_notify_parent() with that signal
  485. * number. Negative return value can be %DEATH_REAP to self-reap right
  486. * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
  487. * parent. Note that a return value of 0 means a do_notify_parent() call
  488. * that sends no signal, but still wakes up a parent blocked in wait*().
  489. *
  490. * Called with write_lock_irq(&tasklist_lock) held.
  491. */
  492. static inline int tracehook_notify_death(struct task_struct *task,
  493. void **death_cookie, int group_dead)
  494. {
  495. if (task_detached(task))
  496. return task->ptrace ? SIGCHLD : DEATH_REAP;
  497. /*
  498. * If something other than our normal parent is ptracing us, then
  499. * send it a SIGCHLD instead of honoring exit_signal. exit_signal
  500. * only has special meaning to our real parent.
  501. */
  502. if (thread_group_empty(task) && !ptrace_reparented(task))
  503. return task->exit_signal;
  504. return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
  505. }
  506. /**
  507. * tracehook_report_death - task is dead and ready to be reaped
  508. * @task: @current task now exiting
  509. * @signal: return value from tracheook_notify_death()
  510. * @death_cookie: value passed back from tracehook_notify_death()
  511. * @group_dead: nonzero if this was the last thread in the group to die
  512. *
  513. * Thread has just become a zombie or is about to self-reap. If positive,
  514. * @signal is the signal number just sent to the parent (usually %SIGCHLD).
  515. * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
  516. * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
  517. * The @death_cookie was passed back by tracehook_notify_death().
  518. *
  519. * If normal reaping is not inhibited, @task->exit_state might be changing
  520. * in parallel.
  521. *
  522. * Called without locks.
  523. */
  524. static inline void tracehook_report_death(struct task_struct *task,
  525. int signal, void *death_cookie,
  526. int group_dead)
  527. {
  528. }
  529. #ifdef TIF_NOTIFY_RESUME
  530. /**
  531. * set_notify_resume - cause tracehook_notify_resume() to be called
  532. * @task: task that will call tracehook_notify_resume()
  533. *
  534. * Calling this arranges that @task will call tracehook_notify_resume()
  535. * before returning to user mode. If it's already running in user mode,
  536. * it will enter the kernel and call tracehook_notify_resume() soon.
  537. * If it's blocked, it will not be woken.
  538. */
  539. static inline void set_notify_resume(struct task_struct *task)
  540. {
  541. if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
  542. kick_process(task);
  543. }
  544. /**
  545. * tracehook_notify_resume - report when about to return to user mode
  546. * @regs: user-mode registers of @current task
  547. *
  548. * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
  549. * about to return to user mode, and the user state in @regs can be
  550. * inspected or adjusted. The caller in arch code has cleared
  551. * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
  552. * asynchronously, this will be called again before we return to
  553. * user mode.
  554. *
  555. * Called without locks.
  556. */
  557. static inline void tracehook_notify_resume(struct pt_regs *regs)
  558. {
  559. }
  560. #endif /* TIF_NOTIFY_RESUME */
  561. #endif /* <linux/tracehook.h> */