tracehook.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * Tracing hooks
  3. *
  4. * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
  5. *
  6. * This copyrighted material is made available to anyone wishing to use,
  7. * modify, copy, or redistribute it subject to the terms and conditions
  8. * of the GNU General Public License v.2.
  9. *
  10. * This file defines hook entry points called by core code where
  11. * user tracing/debugging support might need to do something. These
  12. * entry points are called tracehook_*(). Each hook declared below
  13. * has a detailed kerneldoc comment giving the context (locking et
  14. * al) from which it is called, and the meaning of its return value.
  15. *
  16. * Each function here typically has only one call site, so it is ok
  17. * to have some nontrivial tracehook_*() inlines. In all cases, the
  18. * fast path when no tracing is enabled should be very short.
  19. *
  20. * The purpose of this file and the tracehook_* layer is to consolidate
  21. * the interface that the kernel core and arch code uses to enable any
  22. * user debugging or tracing facility (such as ptrace). The interfaces
  23. * here are carefully documented so that maintainers of core and arch
  24. * code do not need to think about the implementation details of the
  25. * tracing facilities. Likewise, maintainers of the tracing code do not
  26. * need to understand all the calling core or arch code in detail, just
  27. * documented circumstances of each call, such as locking conditions.
  28. *
  29. * If the calling core code changes so that locking is different, then
  30. * it is ok to change the interface documented here. The maintainer of
  31. * core code changing should notify the maintainers of the tracing code
  32. * that they need to work out the change.
  33. *
  34. * Some tracehook_*() inlines take arguments that the current tracing
  35. * implementations might not necessarily use. These function signatures
  36. * are chosen to pass in all the information that is on hand in the
  37. * caller and might conceivably be relevant to a tracer, so that the
  38. * core code won't have to be updated when tracing adds more features.
  39. * If a call site changes so that some of those parameters are no longer
  40. * already on hand without extra work, then the tracehook_* interface
  41. * can change so there is no make-work burden on the core code. The
  42. * maintainer of core code changing should notify the maintainers of the
  43. * tracing code that they need to work out the change.
  44. */
  45. #ifndef _LINUX_TRACEHOOK_H
  46. #define _LINUX_TRACEHOOK_H 1
  47. #include <linux/sched.h>
  48. #include <linux/ptrace.h>
  49. #include <linux/security.h>
  50. struct linux_binprm;
  51. /**
  52. * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
  53. * @task: current task doing exec
  54. *
  55. * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
  56. *
  57. * Called with task_lock() held on @task.
  58. */
  59. static inline int tracehook_unsafe_exec(struct task_struct *task)
  60. {
  61. int unsafe = 0;
  62. int ptrace = task_ptrace(task);
  63. if (ptrace & PT_PTRACED) {
  64. if (ptrace & PT_PTRACE_CAP)
  65. unsafe |= LSM_UNSAFE_PTRACE_CAP;
  66. else
  67. unsafe |= LSM_UNSAFE_PTRACE;
  68. }
  69. return unsafe;
  70. }
  71. /**
  72. * tracehook_tracer_task - return the task that is tracing the given task
  73. * @tsk: task to consider
  74. *
  75. * Returns NULL if noone is tracing @task, or the &struct task_struct
  76. * pointer to its tracer.
  77. *
  78. * Must called under rcu_read_lock(). The pointer returned might be kept
  79. * live only by RCU. During exec, this may be called with task_lock()
  80. * held on @task, still held from when tracehook_unsafe_exec() was called.
  81. */
  82. static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
  83. {
  84. if (task_ptrace(tsk) & PT_PTRACED)
  85. return rcu_dereference(tsk->parent);
  86. return NULL;
  87. }
  88. /**
  89. * tracehook_report_exec - a successful exec was completed
  90. * @fmt: &struct linux_binfmt that performed the exec
  91. * @bprm: &struct linux_binprm containing exec details
  92. * @regs: user-mode register state
  93. *
  94. * An exec just completed, we are shortly going to return to user mode.
  95. * The freshly initialized register state can be seen and changed in @regs.
  96. * The name, file and other pointers in @bprm are still on hand to be
  97. * inspected, but will be freed as soon as this returns.
  98. *
  99. * Called with no locks, but with some kernel resources held live
  100. * and a reference on @fmt->module.
  101. */
  102. static inline void tracehook_report_exec(struct linux_binfmt *fmt,
  103. struct linux_binprm *bprm,
  104. struct pt_regs *regs)
  105. {
  106. if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
  107. unlikely(task_ptrace(current) & PT_PTRACED))
  108. send_sig(SIGTRAP, current, 0);
  109. }
  110. /**
  111. * tracehook_report_exit - task has begun to exit
  112. * @exit_code: pointer to value destined for @current->exit_code
  113. *
  114. * @exit_code points to the value passed to do_exit(), which tracing
  115. * might change here. This is almost the first thing in do_exit(),
  116. * before freeing any resources or setting the %PF_EXITING flag.
  117. *
  118. * Called with no locks held.
  119. */
  120. static inline void tracehook_report_exit(long *exit_code)
  121. {
  122. ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
  123. }
  124. /**
  125. * tracehook_prepare_clone - prepare for new child to be cloned
  126. * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
  127. *
  128. * This is called before a new user task is to be cloned.
  129. * Its return value will be passed to tracehook_finish_clone().
  130. *
  131. * Called with no locks held.
  132. */
  133. static inline int tracehook_prepare_clone(unsigned clone_flags)
  134. {
  135. if (clone_flags & CLONE_UNTRACED)
  136. return 0;
  137. if (clone_flags & CLONE_VFORK) {
  138. if (current->ptrace & PT_TRACE_VFORK)
  139. return PTRACE_EVENT_VFORK;
  140. } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
  141. if (current->ptrace & PT_TRACE_CLONE)
  142. return PTRACE_EVENT_CLONE;
  143. } else if (current->ptrace & PT_TRACE_FORK)
  144. return PTRACE_EVENT_FORK;
  145. return 0;
  146. }
  147. /**
  148. * tracehook_finish_clone - new child created and being attached
  149. * @child: new child task
  150. * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
  151. * @trace: return value from tracehook_clone_prepare()
  152. *
  153. * This is called immediately after adding @child to its parent's children list.
  154. * The @trace value is that returned by tracehook_prepare_clone().
  155. *
  156. * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
  157. */
  158. static inline void tracehook_finish_clone(struct task_struct *child,
  159. unsigned long clone_flags, int trace)
  160. {
  161. ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
  162. }
  163. /**
  164. * tracehook_report_clone - in parent, new child is about to start running
  165. * @trace: return value from tracehook_clone_prepare()
  166. * @regs: parent's user register state
  167. * @clone_flags: flags from parent's system call
  168. * @pid: new child's PID in the parent's namespace
  169. * @child: new child task
  170. *
  171. * Called after a child is set up, but before it has been started running.
  172. * The @trace value is that returned by tracehook_clone_prepare().
  173. * This is not a good place to block, because the child has not started yet.
  174. * Suspend the child here if desired, and block in tracehook_clone_complete().
  175. * This must prevent the child from self-reaping if tracehook_clone_complete()
  176. * uses the @child pointer; otherwise it might have died and been released by
  177. * the time tracehook_report_clone_complete() is called.
  178. *
  179. * Called with no locks held, but the child cannot run until this returns.
  180. */
  181. static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
  182. unsigned long clone_flags,
  183. pid_t pid, struct task_struct *child)
  184. {
  185. if (unlikely(trace)) {
  186. /*
  187. * The child starts up with an immediate SIGSTOP.
  188. */
  189. sigaddset(&child->pending.signal, SIGSTOP);
  190. set_tsk_thread_flag(child, TIF_SIGPENDING);
  191. }
  192. }
  193. /**
  194. * tracehook_report_clone_complete - new child is running
  195. * @trace: return value from tracehook_clone_prepare()
  196. * @regs: parent's user register state
  197. * @clone_flags: flags from parent's system call
  198. * @pid: new child's PID in the parent's namespace
  199. * @child: child task, already running
  200. *
  201. * This is called just after the child has started running. This is
  202. * just before the clone/fork syscall returns, or blocks for vfork
  203. * child completion if @clone_flags has the %CLONE_VFORK bit set.
  204. * The @child pointer may be invalid if a self-reaping child died and
  205. * tracehook_report_clone() took no action to prevent it from self-reaping.
  206. *
  207. * Called with no locks held.
  208. */
  209. static inline void tracehook_report_clone_complete(int trace,
  210. struct pt_regs *regs,
  211. unsigned long clone_flags,
  212. pid_t pid,
  213. struct task_struct *child)
  214. {
  215. if (unlikely(trace))
  216. ptrace_event(0, trace, pid);
  217. }
  218. /**
  219. * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
  220. * @child: child task, already running
  221. * @pid: new child's PID in the parent's namespace
  222. *
  223. * Called after a %CLONE_VFORK parent has waited for the child to complete.
  224. * The clone/vfork system call will return immediately after this.
  225. * The @child pointer may be invalid if a self-reaping child died and
  226. * tracehook_report_clone() took no action to prevent it from self-reaping.
  227. *
  228. * Called with no locks held.
  229. */
  230. static inline void tracehook_report_vfork_done(struct task_struct *child,
  231. pid_t pid)
  232. {
  233. ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
  234. }
  235. /**
  236. * tracehook_prepare_release_task - task is being reaped, clean up tracing
  237. * @task: task in %EXIT_DEAD state
  238. *
  239. * This is called in release_task() just before @task gets finally reaped
  240. * and freed. This would be the ideal place to remove and clean up any
  241. * tracing-related state for @task.
  242. *
  243. * Called with no locks held.
  244. */
  245. static inline void tracehook_prepare_release_task(struct task_struct *task)
  246. {
  247. }
  248. /**
  249. * tracehook_finish_release_task - task is being reaped, clean up tracing
  250. * @task: task in %EXIT_DEAD state
  251. *
  252. * This is called in release_task() when @task is being in the middle of
  253. * being reaped. After this, there must be no tracing entanglements.
  254. *
  255. * Called with write_lock_irq(&tasklist_lock) held.
  256. */
  257. static inline void tracehook_finish_release_task(struct task_struct *task)
  258. {
  259. ptrace_release_task(task);
  260. }
  261. #endif /* <linux/tracehook.h> */