cn_proc.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * cn_proc.c - process events connector
  3. *
  4. * Copyright (C) Matt Helsley, IBM Corp. 2005
  5. * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
  6. * Original copyright notice follows:
  7. * Copyright (C) 2005 BULL SA.
  8. *
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/module.h>
  25. #include <linux/kernel.h>
  26. #include <linux/ktime.h>
  27. #include <linux/init.h>
  28. #include <linux/connector.h>
  29. #include <linux/gfp.h>
  30. #include <linux/ptrace.h>
  31. #include <linux/atomic.h>
  32. #include <asm/unaligned.h>
  33. #include <linux/cn_proc.h>
  34. #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
  35. static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
  36. static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
  37. /* proc_event_counts is used as the sequence number of the netlink message */
  38. static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
  39. static inline void get_seq(__u32 *ts, int *cpu)
  40. {
  41. preempt_disable();
  42. *ts = __this_cpu_inc_return(proc_event_counts) -1;
  43. *cpu = smp_processor_id();
  44. preempt_enable();
  45. }
  46. void proc_fork_connector(struct task_struct *task)
  47. {
  48. struct cn_msg *msg;
  49. struct proc_event *ev;
  50. __u8 buffer[CN_PROC_MSG_SIZE];
  51. struct timespec ts;
  52. struct task_struct *parent;
  53. if (atomic_read(&proc_event_num_listeners) < 1)
  54. return;
  55. msg = (struct cn_msg*)buffer;
  56. ev = (struct proc_event*)msg->data;
  57. get_seq(&msg->seq, &ev->cpu);
  58. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  59. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  60. ev->what = PROC_EVENT_FORK;
  61. rcu_read_lock();
  62. parent = rcu_dereference(task->real_parent);
  63. ev->event_data.fork.parent_pid = parent->pid;
  64. ev->event_data.fork.parent_tgid = parent->tgid;
  65. rcu_read_unlock();
  66. ev->event_data.fork.child_pid = task->pid;
  67. ev->event_data.fork.child_tgid = task->tgid;
  68. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  69. msg->ack = 0; /* not used */
  70. msg->len = sizeof(*ev);
  71. /* If cn_netlink_send() failed, the data is not sent */
  72. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  73. }
  74. void proc_exec_connector(struct task_struct *task)
  75. {
  76. struct cn_msg *msg;
  77. struct proc_event *ev;
  78. struct timespec ts;
  79. __u8 buffer[CN_PROC_MSG_SIZE];
  80. if (atomic_read(&proc_event_num_listeners) < 1)
  81. return;
  82. msg = (struct cn_msg*)buffer;
  83. ev = (struct proc_event*)msg->data;
  84. get_seq(&msg->seq, &ev->cpu);
  85. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  86. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  87. ev->what = PROC_EVENT_EXEC;
  88. ev->event_data.exec.process_pid = task->pid;
  89. ev->event_data.exec.process_tgid = task->tgid;
  90. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  91. msg->ack = 0; /* not used */
  92. msg->len = sizeof(*ev);
  93. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  94. }
  95. void proc_id_connector(struct task_struct *task, int which_id)
  96. {
  97. struct cn_msg *msg;
  98. struct proc_event *ev;
  99. __u8 buffer[CN_PROC_MSG_SIZE];
  100. struct timespec ts;
  101. const struct cred *cred;
  102. if (atomic_read(&proc_event_num_listeners) < 1)
  103. return;
  104. msg = (struct cn_msg*)buffer;
  105. ev = (struct proc_event*)msg->data;
  106. ev->what = which_id;
  107. ev->event_data.id.process_pid = task->pid;
  108. ev->event_data.id.process_tgid = task->tgid;
  109. rcu_read_lock();
  110. cred = __task_cred(task);
  111. if (which_id == PROC_EVENT_UID) {
  112. ev->event_data.id.r.ruid = cred->uid;
  113. ev->event_data.id.e.euid = cred->euid;
  114. } else if (which_id == PROC_EVENT_GID) {
  115. ev->event_data.id.r.rgid = cred->gid;
  116. ev->event_data.id.e.egid = cred->egid;
  117. } else {
  118. rcu_read_unlock();
  119. return;
  120. }
  121. rcu_read_unlock();
  122. get_seq(&msg->seq, &ev->cpu);
  123. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  124. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  125. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  126. msg->ack = 0; /* not used */
  127. msg->len = sizeof(*ev);
  128. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  129. }
  130. void proc_sid_connector(struct task_struct *task)
  131. {
  132. struct cn_msg *msg;
  133. struct proc_event *ev;
  134. struct timespec ts;
  135. __u8 buffer[CN_PROC_MSG_SIZE];
  136. if (atomic_read(&proc_event_num_listeners) < 1)
  137. return;
  138. msg = (struct cn_msg *)buffer;
  139. ev = (struct proc_event *)msg->data;
  140. get_seq(&msg->seq, &ev->cpu);
  141. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  142. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  143. ev->what = PROC_EVENT_SID;
  144. ev->event_data.sid.process_pid = task->pid;
  145. ev->event_data.sid.process_tgid = task->tgid;
  146. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  147. msg->ack = 0; /* not used */
  148. msg->len = sizeof(*ev);
  149. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  150. }
  151. void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
  152. {
  153. struct cn_msg *msg;
  154. struct proc_event *ev;
  155. struct timespec ts;
  156. __u8 buffer[CN_PROC_MSG_SIZE];
  157. if (atomic_read(&proc_event_num_listeners) < 1)
  158. return;
  159. msg = (struct cn_msg *)buffer;
  160. ev = (struct proc_event *)msg->data;
  161. get_seq(&msg->seq, &ev->cpu);
  162. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  163. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  164. ev->what = PROC_EVENT_PTRACE;
  165. ev->event_data.ptrace.process_pid = task->pid;
  166. ev->event_data.ptrace.process_tgid = task->tgid;
  167. if (ptrace_id == PTRACE_ATTACH) {
  168. ev->event_data.ptrace.tracer_pid = current->pid;
  169. ev->event_data.ptrace.tracer_tgid = current->tgid;
  170. } else if (ptrace_id == PTRACE_DETACH) {
  171. ev->event_data.ptrace.tracer_pid = 0;
  172. ev->event_data.ptrace.tracer_tgid = 0;
  173. } else
  174. return;
  175. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  176. msg->ack = 0; /* not used */
  177. msg->len = sizeof(*ev);
  178. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  179. }
  180. void proc_comm_connector(struct task_struct *task)
  181. {
  182. struct cn_msg *msg;
  183. struct proc_event *ev;
  184. struct timespec ts;
  185. __u8 buffer[CN_PROC_MSG_SIZE];
  186. if (atomic_read(&proc_event_num_listeners) < 1)
  187. return;
  188. msg = (struct cn_msg *)buffer;
  189. ev = (struct proc_event *)msg->data;
  190. get_seq(&msg->seq, &ev->cpu);
  191. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  192. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  193. ev->what = PROC_EVENT_COMM;
  194. ev->event_data.comm.process_pid = task->pid;
  195. ev->event_data.comm.process_tgid = task->tgid;
  196. get_task_comm(ev->event_data.comm.comm, task);
  197. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  198. msg->ack = 0; /* not used */
  199. msg->len = sizeof(*ev);
  200. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  201. }
  202. void proc_exit_connector(struct task_struct *task)
  203. {
  204. struct cn_msg *msg;
  205. struct proc_event *ev;
  206. __u8 buffer[CN_PROC_MSG_SIZE];
  207. struct timespec ts;
  208. if (atomic_read(&proc_event_num_listeners) < 1)
  209. return;
  210. msg = (struct cn_msg*)buffer;
  211. ev = (struct proc_event*)msg->data;
  212. get_seq(&msg->seq, &ev->cpu);
  213. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  214. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  215. ev->what = PROC_EVENT_EXIT;
  216. ev->event_data.exit.process_pid = task->pid;
  217. ev->event_data.exit.process_tgid = task->tgid;
  218. ev->event_data.exit.exit_code = task->exit_code;
  219. ev->event_data.exit.exit_signal = task->exit_signal;
  220. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  221. msg->ack = 0; /* not used */
  222. msg->len = sizeof(*ev);
  223. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  224. }
  225. /*
  226. * Send an acknowledgement message to userspace
  227. *
  228. * Use 0 for success, EFOO otherwise.
  229. * Note: this is the negative of conventional kernel error
  230. * values because it's not being returned via syscall return
  231. * mechanisms.
  232. */
  233. static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
  234. {
  235. struct cn_msg *msg;
  236. struct proc_event *ev;
  237. __u8 buffer[CN_PROC_MSG_SIZE];
  238. struct timespec ts;
  239. if (atomic_read(&proc_event_num_listeners) < 1)
  240. return;
  241. msg = (struct cn_msg*)buffer;
  242. ev = (struct proc_event*)msg->data;
  243. msg->seq = rcvd_seq;
  244. ktime_get_ts(&ts); /* get high res monotonic timestamp */
  245. put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
  246. ev->cpu = -1;
  247. ev->what = PROC_EVENT_NONE;
  248. ev->event_data.ack.err = err;
  249. memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  250. msg->ack = rcvd_ack + 1;
  251. msg->len = sizeof(*ev);
  252. cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
  253. }
  254. /**
  255. * cn_proc_mcast_ctl
  256. * @data: message sent from userspace via the connector
  257. */
  258. static void cn_proc_mcast_ctl(struct cn_msg *msg,
  259. struct netlink_skb_parms *nsp)
  260. {
  261. enum proc_cn_mcast_op *mc_op = NULL;
  262. int err = 0;
  263. if (msg->len != sizeof(*mc_op))
  264. return;
  265. mc_op = (enum proc_cn_mcast_op*)msg->data;
  266. switch (*mc_op) {
  267. case PROC_CN_MCAST_LISTEN:
  268. atomic_inc(&proc_event_num_listeners);
  269. break;
  270. case PROC_CN_MCAST_IGNORE:
  271. atomic_dec(&proc_event_num_listeners);
  272. break;
  273. default:
  274. err = EINVAL;
  275. break;
  276. }
  277. cn_proc_ack(err, msg->seq, msg->ack);
  278. }
  279. /*
  280. * cn_proc_init - initialization entry point
  281. *
  282. * Adds the connector callback to the connector driver.
  283. */
  284. static int __init cn_proc_init(void)
  285. {
  286. int err;
  287. if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc",
  288. &cn_proc_mcast_ctl))) {
  289. printk(KERN_WARNING "cn_proc failed to register\n");
  290. return err;
  291. }
  292. return 0;
  293. }
  294. module_init(cn_proc_init);